diff --git a/bin/sync_docs.sh b/bin/sync_docs.sh new file mode 100644 index 0000000000..6b383e4548 --- /dev/null +++ b/bin/sync_docs.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +###### +# This script synchronizes content to the downstream repository. +# A Jenkins job configures the source and target repositories and runs this script directly. +# If you commit changes to this script you should verify the Jenkins job runs successfully. +###### + +# Set the path to the source and target directories. +# The source directory contains the content that you want to synchronize. +source=source +# The target directory is the location where you want to synchronize content. +target=target + +# Clean the existing downstream and release-note folders. +rm -rf $target/downstream + +# Copy the content of the downstream and release-note folders. +cp -r $source/downstream $target/downstream diff --git a/bin/sync_ocp_latest.sh b/bin/sync_ocp_latest.sh new file mode 100644 index 0000000000..217c388759 --- /dev/null +++ b/bin/sync_ocp_latest.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# Set the path to the file that contains the ":OCPLatest:" attribute. +attributes=../downstream/attributes/attributes.adoc + +# Retrieve the OCP attributes file from the OpenShift docs repository. +curl https://raw.githubusercontent.com/openshift/openshift-docs/main/_attributes/ocp-attributes.adoc -o ocp-attributes.adoc +# Save the value of the "product-version" attribute as a variable. +ocpversion=$(sed -n -e 's/^:product-version: //p' ocp-attributes.adoc) +# Replace the value of the "OCPLatest" attribute with the value of the "product-version" attribute. +sed -i -e "s/^:OCPLatest:.*/:OCPLatest: $ocpversion/" $attributes +# Delete the OCP attributes file. +rm -f ocp-attributes.adoc diff --git a/controller-api/swagger.json b/controller-api/swagger.json new file mode 100644 index 0000000000..27c60215f0 --- /dev/null +++ b/controller-api/swagger.json @@ -0,0 +1,49196 @@ +{ + "basePath": "/", + "consumes": [ + "application/json" + ], + "definitions": { + "ActivityStream": { + "properties": { + "action_node": { + "description": "The cluster node the activity took place on.", + "minLength": 1, + "readOnly": true, + "title": "Action node", + "type": "string" + }, + "changes": { + "description": "A summary of the new and changed values when an object is created, updated, or deleted", + "readOnly": true, + "title": "Changes", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "object1": { + "description": "For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2.", + "minLength": 1, + "title": "Object1", + "type": "string", + "x-nullable": true + }, + "object2": { + "description": "Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with.", + "minLength": 1, + "title": "Object2", + "type": "string", + "x-nullable": true + }, + "object_association": { + "description": "When present, shows the field name of the role or relationship that changed.", + "readOnly": true, + "title": "Object association", + "type": "string" + }, + "object_type": { + "description": "When present, shows the model on which the role or relationship was defined.", + "readOnly": true, + "title": "Object type", + "type": "string" + }, + "operation": { + "description": "The action taken with respect to the given object(s).", + "enum": [ + "create", + "update", + "delete", + "associate", + "disassociate" + ], + "title": "Operation", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timestamp": { + "format": "date-time", + "readOnly": true, + "title": "Timestamp", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "operation", + "object1", + "object2" + ], + "type": "object" + }, + "AdHocCommandCancel": { + "properties": { + "can_cancel": { + "readOnly": true, + "title": "Can cancel", + "type": "boolean" + } + }, + "type": "object" + }, + "AdHocCommandDetail": { + "properties": { + "become_enabled": { + "default": false, + "title": "Become enabled", + "type": "boolean", + "x-nullable": true + }, + "canceled_on": { + "description": "The date and time when the cancel request was sent.", + "format": "date-time", + "readOnly": true, + "title": "Canceled on", + "type": "string", + "x-nullable": true + }, + "controller_node": { + "description": "The instance that managed the execution environment.", + "minLength": 1, + "readOnly": true, + "title": "Controller node", + "type": "string" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credential": { + "title": "Credential", + "type": "integer", + "x-nullable": true + }, + "diff_mode": { + "default": false, + "title": "Diff mode", + "type": "boolean", + "x-nullable": true + }, + "elapsed": { + "description": "Elapsed time in seconds that the job ran.", + "format": "decimal", + "readOnly": true, + "title": "Elapsed", + "type": "string" + }, + "event_processing_finished": { + "description": "Indicates whether all of the events generated by this unified job have been saved to the database.", + "readOnly": true, + "title": "Event processing finished", + "type": "boolean" + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "execution_node": { + "description": "The node the job executed on.", + "minLength": 1, + "readOnly": true, + "title": "Execution node", + "type": "string" + }, + "extra_vars": { + "default": "", + "title": "Extra vars", + "type": "string", + "x-nullable": true + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "finished": { + "description": "The date and time the job finished execution.", + "format": "date-time", + "readOnly": true, + "title": "Finished", + "type": "string", + "x-nullable": true + }, + "forks": { + "default": 0, + "minimum": 0, + "title": "Forks", + "type": "integer" + }, + "host_status_counts": { + "description": "Playbook stats from the Ansible playbook_on_stats event.", + "readOnly": true, + "title": "Host status counts", + "type": "object", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inventory": { + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "job_args": { + "minLength": 1, + "readOnly": true, + "title": "Job args", + "type": "string" + }, + "job_cwd": { + "minLength": 1, + "readOnly": true, + "title": "Job cwd", + "type": "string" + }, + "job_env": { + "readOnly": true, + "title": "Job env", + "type": "object" + }, + "job_explanation": { + "description": "A status field to indicate the state of the job if it wasn't able to run and capture stdout", + "minLength": 1, + "readOnly": true, + "title": "Job explanation", + "type": "string" + }, + "job_type": { + "default": "run", + "enum": [ + "run", + "check" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "launch_type": { + "enum": [ + "manual", + "relaunch", + "callback", + "scheduled", + "dependency", + "workflow", + "webhook", + "sync", + "scm" + ], + "readOnly": true, + "title": "Launch type", + "type": "string" + }, + "launched_by": { + "readOnly": true, + "title": "Launched by", + "type": "string" + }, + "limit": { + "default": "", + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "module_args": { + "default": "", + "title": "Module args", + "type": "string", + "x-nullable": true + }, + "module_name": { + "default": "command", + "enum": [ + "command", + "shell", + "yum", + "apt", + "apt_key", + "apt_repository", + "apt_rpm", + "service", + "group", + "user", + "mount", + "ping", + "selinux", + "setup", + "win_ping", + "win_service", + "win_updates", + "win_group", + "win_user" + ], + "title": "Module name", + "type": "string" + }, + "name": { + "minLength": 1, + "readOnly": true, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "result_traceback": { + "minLength": 1, + "readOnly": true, + "title": "Result traceback", + "type": "string" + }, + "started": { + "description": "The date and time the job was queued for starting.", + "format": "date-time", + "readOnly": true, + "title": "Started", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "verbosity": { + "default": 0, + "enum": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "title": "Verbosity", + "type": "integer" + }, + "work_unit_id": { + "description": "The Receptor work unit ID associated with this job.", + "minLength": 1, + "readOnly": true, + "title": "Work unit id", + "type": "string", + "x-nullable": true + } + }, + "type": "object" + }, + "AdHocCommandEvent": { + "properties": { + "ad_hoc_command": { + "readOnly": true, + "title": "Ad hoc command", + "type": "string" + }, + "changed": { + "readOnly": true, + "title": "Changed", + "type": "boolean" + }, + "counter": { + "minimum": 0, + "readOnly": true, + "title": "Counter", + "type": "integer" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "end_line": { + "minimum": 0, + "readOnly": true, + "title": "End line", + "type": "integer" + }, + "event": { + "enum": [ + "runner_on_failed", + "runner_on_ok", + "runner_on_unreachable", + "runner_on_skipped", + "debug", + "verbose", + "deprecated", + "warning", + "system_warning", + "error" + ], + "title": "Event", + "type": "string", + "x-nullable": true + }, + "event_data": { + "default": {}, + "title": "Event data", + "type": "object" + }, + "event_display": { + "minLength": 1, + "readOnly": true, + "title": "Event display", + "type": "string" + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "host": { + "readOnly": true, + "title": "Host", + "type": "integer", + "x-nullable": true + }, + "host_name": { + "minLength": 1, + "readOnly": true, + "title": "Host name", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "start_line": { + "minimum": 0, + "readOnly": true, + "title": "Start line", + "type": "integer" + }, + "stdout": { + "minLength": 1, + "readOnly": true, + "title": "Stdout", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "uuid": { + "minLength": 1, + "readOnly": true, + "title": "Uuid", + "type": "string" + }, + "verbosity": { + "minimum": 0, + "readOnly": true, + "title": "Verbosity", + "type": "integer" + } + }, + "required": [ + "event" + ], + "type": "object" + }, + "AdHocCommandList": { + "properties": { + "become_enabled": { + "default": false, + "title": "Become enabled", + "type": "boolean", + "x-nullable": true + }, + "canceled_on": { + "description": "The date and time when the cancel request was sent.", + "format": "date-time", + "readOnly": true, + "title": "Canceled on", + "type": "string", + "x-nullable": true + }, + "controller_node": { + "description": "The instance that managed the execution environment.", + "minLength": 1, + "readOnly": true, + "title": "Controller node", + "type": "string" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credential": { + "title": "Credential", + "type": "integer", + "x-nullable": true + }, + "diff_mode": { + "default": false, + "title": "Diff mode", + "type": "boolean", + "x-nullable": true + }, + "elapsed": { + "description": "Elapsed time in seconds that the job ran.", + "format": "decimal", + "readOnly": true, + "title": "Elapsed", + "type": "string" + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "execution_node": { + "description": "The node the job executed on.", + "minLength": 1, + "readOnly": true, + "title": "Execution node", + "type": "string" + }, + "extra_vars": { + "default": "", + "title": "Extra vars", + "type": "string", + "x-nullable": true + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "finished": { + "description": "The date and time the job finished execution.", + "format": "date-time", + "readOnly": true, + "title": "Finished", + "type": "string", + "x-nullable": true + }, + "forks": { + "default": 0, + "minimum": 0, + "title": "Forks", + "type": "integer" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inventory": { + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "job_explanation": { + "description": "A status field to indicate the state of the job if it wasn't able to run and capture stdout", + "minLength": 1, + "readOnly": true, + "title": "Job explanation", + "type": "string" + }, + "job_type": { + "default": "run", + "enum": [ + "run", + "check" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "launch_type": { + "enum": [ + "manual", + "relaunch", + "callback", + "scheduled", + "dependency", + "workflow", + "webhook", + "sync", + "scm" + ], + "readOnly": true, + "title": "Launch type", + "type": "string" + }, + "launched_by": { + "readOnly": true, + "title": "Launched by", + "type": "string" + }, + "limit": { + "default": "", + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "module_args": { + "default": "", + "title": "Module args", + "type": "string", + "x-nullable": true + }, + "module_name": { + "default": "command", + "enum": [ + "command", + "shell", + "yum", + "apt", + "apt_key", + "apt_repository", + "apt_rpm", + "service", + "group", + "user", + "mount", + "ping", + "selinux", + "setup", + "win_ping", + "win_service", + "win_updates", + "win_group", + "win_user" + ], + "title": "Module name", + "type": "string" + }, + "name": { + "minLength": 1, + "readOnly": true, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "started": { + "description": "The date and time the job was queued for starting.", + "format": "date-time", + "readOnly": true, + "title": "Started", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "verbosity": { + "default": 0, + "enum": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "title": "Verbosity", + "type": "integer" + }, + "work_unit_id": { + "description": "The Receptor work unit ID associated with this job.", + "minLength": 1, + "readOnly": true, + "title": "Work unit id", + "type": "string", + "x-nullable": true + } + }, + "type": "object" + }, + "AdHocCommandRelaunch": { + "properties": {}, + "type": "object" + }, + "AnsibleFacts": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "BulkHost": { + "properties": { + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "enabled": { + "default": true, + "description": "Is this host online and available for running jobs?", + "title": "Enabled", + "type": "boolean", + "x-nullable": true + }, + "instance_id": { + "default": "", + "description": "The value used by the remote inventory source to uniquely identify the host", + "maxLength": 1024, + "title": "Instance id", + "type": "string", + "x-nullable": true + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "variables": { + "default": "", + "description": "Host variables in JSON or YAML format.", + "title": "Variables", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "BulkHostCreate": { + "properties": { + "hosts": { + "description": "List of hosts to be created, JSON. e.g. [{\"name\": \"example.com\"}, {\"name\": \"127.0.0.1\"}]", + "items": { + "$ref": "#/definitions/BulkHost" + }, + "maxItems": 100000, + "type": "array" + }, + "inventory": { + "description": "Primary Key ID of inventory to add hosts to.", + "title": "Inventory", + "type": "integer" + } + }, + "required": [ + "inventory", + "hosts" + ], + "type": "object" + }, + "BulkJobLaunch": { + "properties": { + "description": { + "minLength": 1, + "title": "Description", + "type": "string" + }, + "extra_vars": { + "title": "Extra vars", + "type": "object" + }, + "inventory": { + "title": "Inventory", + "type": "integer" + }, + "job_tags": { + "minLength": 1, + "title": "Job tags", + "type": "string" + }, + "jobs": { + "description": "List of jobs to be launched, JSON. e.g. [{\"unified_job_template\": 7}, {\"unified_job_template\": 10}]", + "items": { + "$ref": "#/definitions/BulkJobNode" + }, + "type": "array" + }, + "limit": { + "minLength": 1, + "title": "Limit", + "type": "string" + }, + "name": { + "default": "Bulk Job Launch", + "maxLength": 512, + "title": "Name", + "type": "string" + }, + "organization": { + "description": "Inherit permissions from this organization. If not provided, a organization the user is a member of will be selected automatically.", + "title": "Organization", + "type": "integer", + "x-nullable": true + }, + "scm_branch": { + "minLength": 1, + "title": "Scm branch", + "type": "string" + }, + "skip_tags": { + "minLength": 1, + "title": "Skip tags", + "type": "string" + } + }, + "required": [ + "jobs" + ], + "type": "object" + }, + "BulkJobNode": { + "description": "List of jobs to be launched, JSON. e.g. [{\"unified_job_template\": 7}, {\"unified_job_template\": 10}]", + "properties": { + "all_parents_must_converge": { + "default": false, + "description": "If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node", + "title": "All parents must converge", + "type": "boolean", + "x-nullable": true + }, + "always_nodes": { + "items": { + "type": "integer" + }, + "readOnly": true, + "type": "array", + "uniqueItems": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credentials": { + "items": { + "minimum": 1, + "type": "integer" + }, + "type": "array" + }, + "diff_mode": { + "title": "Diff mode", + "type": "boolean", + "x-nullable": true + }, + "do_not_run": { + "default": false, + "description": "Indicates that a job will not be created when True. Workflow runtime semantics will mark this True if the node is in a path that will decidedly not be ran. A value of False means the node may not run.", + "title": "Do not run", + "type": "boolean", + "x-nullable": true + }, + "execution_environment": { + "minimum": 1, + "title": "Execution environment", + "type": "integer" + }, + "extra_data": { + "default": {}, + "title": "Extra data", + "type": "object" + }, + "failure_nodes": { + "items": { + "type": "integer" + }, + "readOnly": true, + "type": "array", + "uniqueItems": true + }, + "forks": { + "minimum": 0, + "title": "Forks", + "type": "integer", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "identifier": { + "description": "An identifier coresponding to the workflow job template node that this node was created from.", + "maxLength": 512, + "title": "Identifier", + "type": "string", + "x-nullable": true + }, + "instance_groups": { + "items": { + "minimum": 1, + "type": "integer" + }, + "type": "array" + }, + "inventory": { + "minimum": 1, + "title": "Inventory", + "type": "integer" + }, + "job": { + "title": "Job", + "type": "integer", + "x-nullable": true + }, + "job_slice_count": { + "minimum": 0, + "title": "Job slice count", + "type": "integer", + "x-nullable": true + }, + "job_tags": { + "title": "Job tags", + "type": "string", + "x-nullable": true + }, + "job_type": { + "enum": [ + "run", + "check" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "labels": { + "items": { + "minimum": 1, + "type": "integer" + }, + "type": "array" + }, + "limit": { + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scm_branch": { + "title": "Scm branch", + "type": "string", + "x-nullable": true + }, + "skip_tags": { + "title": "Skip tags", + "type": "string", + "x-nullable": true + }, + "success_nodes": { + "items": { + "type": "integer" + }, + "readOnly": true, + "type": "array", + "uniqueItems": true + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timeout": { + "title": "Timeout", + "type": "integer", + "x-nullable": true + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "description": "Primary key of the template for this job, can be a job template or inventory source.", + "minimum": 1, + "title": "Unified job template", + "type": "integer" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "verbosity": { + "enum": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "title": "Verbosity", + "type": "string", + "x-nullable": true + }, + "workflow_job": { + "title": "Workflow job", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "unified_job_template" + ], + "type": "object" + }, + "ConstructedInventory": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "has_active_failures": { + "description": "This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed.", + "readOnly": true, + "title": "Has active failures", + "type": "boolean" + }, + "has_inventory_sources": { + "description": "This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources.", + "readOnly": true, + "title": "Has inventory sources", + "type": "boolean" + }, + "hosts_with_active_failures": { + "description": "This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures.", + "minimum": 0, + "readOnly": true, + "title": "Hosts with active failures", + "type": "integer" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inventory_sources_with_failures": { + "description": "Number of external inventory sources in this inventory with failures.", + "minimum": 0, + "readOnly": true, + "title": "Inventory sources with failures", + "type": "integer" + }, + "kind": { + "description": "Kind of inventory being represented.", + "enum": [ + "", + "smart", + "constructed" + ], + "readOnly": true, + "title": "Kind", + "type": "string", + "x-nullable": true + }, + "limit": { + "description": "The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory.", + "title": "Limit", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "organization": { + "description": "Organization containing this inventory.", + "title": "Organization", + "type": "integer", + "x-nullable": true + }, + "pending_deletion": { + "description": "Flag indicating the inventory is being deleted.", + "readOnly": true, + "title": "Pending deletion", + "type": "boolean" + }, + "prevent_instance_group_fallback": { + "default": false, + "description": "If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied.", + "title": "Prevent instance group fallback", + "type": "boolean", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "source_vars": { + "description": "The source_vars for the related auto-created inventory source, special to constructed inventory.", + "title": "Source vars", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "total_groups": { + "description": "This field is deprecated and will be removed in a future release. Total number of groups in this inventory.", + "minimum": 0, + "readOnly": true, + "title": "Total groups", + "type": "integer" + }, + "total_hosts": { + "description": "This field is deprecated and will be removed in a future release. Total number of hosts in this inventory.", + "minimum": 0, + "readOnly": true, + "title": "Total hosts", + "type": "integer" + }, + "total_inventory_sources": { + "description": "Total number of external inventory sources configured within this inventory.", + "minimum": 0, + "readOnly": true, + "title": "Total inventory sources", + "type": "integer" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "update_cache_timeout": { + "description": "The cache timeout for the related auto-created inventory source, special to constructed inventory", + "minimum": 0, + "title": "Update cache timeout", + "type": "integer", + "x-nullable": true + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "variables": { + "default": "", + "description": "Inventory variables in JSON or YAML format.", + "title": "Variables", + "type": "string", + "x-nullable": true + }, + "verbosity": { + "description": "The verbosity level for the related auto-created inventory source, special to constructed inventory", + "maximum": 2, + "minimum": 0, + "title": "Verbosity", + "type": "integer", + "x-nullable": true + } + }, + "required": [ + "name", + "organization" + ], + "type": "object" + }, + "Copy": { + "properties": { + "name": { + "minLength": 1, + "title": "Name", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "Credential": { + "properties": { + "cloud": { + "readOnly": true, + "title": "Cloud", + "type": "string" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credential_type": { + "description": "Specify the type of credential you want to create. Refer to the documentation for details on each type.", + "title": "Credential Type", + "type": "integer" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inputs": { + "default": {}, + "description": "Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax.", + "title": "Inputs", + "type": "object" + }, + "kind": { + "readOnly": true, + "title": "Kind", + "type": "string" + }, + "kubernetes": { + "readOnly": true, + "title": "Kubernetes", + "type": "string" + }, + "managed": { + "readOnly": true, + "title": "Managed", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "organization": { + "title": "Organization", + "type": "integer", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name", + "credential_type" + ], + "type": "object" + }, + "CredentialInputSource": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "input_field_name": { + "maxLength": 1024, + "minLength": 1, + "title": "Input field name", + "type": "string", + "x-nullable": true + }, + "metadata": { + "default": {}, + "title": "Metadata", + "type": "object" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "source_credential": { + "title": "Source credential", + "type": "integer", + "x-nullable": true + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "target_credential": { + "title": "Target credential", + "type": "integer", + "x-nullable": true + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "input_field_name", + "target_credential", + "source_credential" + ], + "type": "object" + }, + "CredentialSerializerCreate": { + "properties": { + "cloud": { + "readOnly": true, + "title": "Cloud", + "type": "string" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credential_type": { + "description": "Specify the type of credential you want to create. Refer to the documentation for details on each type.", + "title": "Credential Type", + "type": "integer" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inputs": { + "default": {}, + "description": "Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax.", + "title": "Inputs", + "type": "object" + }, + "kind": { + "readOnly": true, + "title": "Kind", + "type": "string" + }, + "kubernetes": { + "readOnly": true, + "title": "Kubernetes", + "type": "string" + }, + "managed": { + "readOnly": true, + "title": "Managed", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "organization": { + "description": "Inherit permissions from organization roles. If provided on creation, do not give either user or team.", + "title": "Organization", + "type": "integer", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "team": { + "description": "Write-only field used to add team to owner role. If provided, do not give either user or organization. Only valid for creation.", + "title": "Team", + "type": "integer", + "x-nullable": true + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "user": { + "description": "Write-only field used to add user to owner role. If provided, do not give either team or organization. Only valid for creation.", + "title": "User", + "type": "integer", + "x-nullable": true + } + }, + "required": [ + "name", + "credential_type" + ], + "type": "object" + }, + "CredentialType": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "injectors": { + "default": {}, + "description": "Enter injectors using either JSON or YAML syntax. Refer to the documentation for example syntax.", + "title": "Injectors", + "type": "object" + }, + "inputs": { + "default": {}, + "description": "Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax.", + "title": "Inputs", + "type": "object" + }, + "kind": { + "enum": [ + "ssh", + "vault", + "net", + "scm", + "cloud", + "registry", + "token", + "insights", + "external", + "kubernetes", + "galaxy", + "cryptography" + ], + "title": "Kind", + "type": "string", + "x-nullable": true + }, + "managed": { + "readOnly": true, + "title": "Managed", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "namespace": { + "minLength": 1, + "readOnly": true, + "title": "Namespace", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name", + "kind" + ], + "type": "object" + }, + "Empty": { + "properties": {}, + "type": "object" + }, + "ExecutionEnvironment": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credential": { + "title": "Credential", + "type": "integer", + "x-nullable": true + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "image": { + "description": "The full image location, including the container registry, image name, and version tag.", + "maxLength": 1024, + "minLength": 1, + "title": "Image location", + "type": "string", + "x-nullable": true + }, + "managed": { + "readOnly": true, + "title": "Managed", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "organization": { + "description": "The organization used to determine access to this execution environment.", + "title": "Organization", + "type": "integer", + "x-nullable": true + }, + "pull": { + "default": "", + "description": "Pull image before running?", + "enum": [ + "always", + "missing", + "never" + ], + "title": "Pull", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name", + "image" + ], + "type": "object" + }, + "Group": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inventory": { + "title": "Inventory", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "variables": { + "default": "", + "description": "Group variables in JSON or YAML format.", + "title": "Variables", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name", + "inventory" + ], + "type": "object" + }, + "GroupTree": { + "properties": { + "children": { + "readOnly": true, + "title": "Children", + "type": "string" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inventory": { + "title": "Inventory", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "variables": { + "default": "", + "description": "Group variables in JSON or YAML format.", + "title": "Variables", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name", + "inventory" + ], + "type": "object" + }, + "GroupVariableData": { + "properties": { + "variables": { + "default": "", + "description": "Group variables in JSON or YAML format.", + "title": "Variables", + "type": "string", + "x-nullable": true + } + }, + "type": "object" + }, + "Host": { + "properties": { + "ansible_facts_modified": { + "description": "The date and time ansible_facts was last modified.", + "format": "date-time", + "readOnly": true, + "title": "Ansible facts modified", + "type": "string", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "enabled": { + "default": true, + "description": "Is this host online and available for running jobs?", + "title": "Enabled", + "type": "boolean", + "x-nullable": true + }, + "has_active_failures": { + "readOnly": true, + "title": "Has active failures", + "type": "string" + }, + "has_inventory_sources": { + "readOnly": true, + "title": "Has inventory sources", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "instance_id": { + "default": "", + "description": "The value used by the remote inventory source to uniquely identify the host", + "maxLength": 1024, + "title": "Instance id", + "type": "string", + "x-nullable": true + }, + "inventory": { + "title": "Inventory", + "type": "integer" + }, + "last_job": { + "readOnly": true, + "title": "Last job", + "type": "string", + "x-nullable": true + }, + "last_job_host_summary": { + "readOnly": true, + "title": "Last job host summary", + "type": "integer", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "variables": { + "default": "", + "description": "Host variables in JSON or YAML format.", + "title": "Variables", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name", + "inventory" + ], + "type": "object" + }, + "HostMetric": { + "properties": { + "automated_counter": { + "default": 0, + "description": "How many times was the host automated", + "title": "Automated counter", + "type": "integer" + }, + "deleted": { + "default": false, + "description": "Boolean flag saying whether the host is deleted and therefore not counted into the subscription consumption", + "title": "Deleted", + "type": "boolean", + "x-nullable": true + }, + "deleted_counter": { + "default": 0, + "description": "How many times was the host deleted", + "title": "Deleted counter", + "type": "integer" + }, + "first_automation": { + "description": "When the host was first automated against", + "format": "date-time", + "readOnly": true, + "title": "First automation", + "type": "string" + }, + "hostname": { + "maxLength": 512, + "minLength": 1, + "title": "Hostname", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "last_automation": { + "description": "When the host was last automated against", + "format": "date-time", + "title": "Last automation", + "type": "string" + }, + "last_deleted": { + "description": "When the host was last deleted", + "format": "date-time", + "title": "Last deleted", + "type": "string", + "x-nullable": true + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "used_in_inventories": { + "description": "How many inventories contain this host", + "title": "Used in inventories", + "type": "integer", + "x-nullable": true + } + }, + "required": [ + "hostname", + "last_automation" + ], + "type": "object" + }, + "HostMetricSummaryMonthly": { + "properties": { + "date": { + "format": "date", + "readOnly": true, + "title": "Date", + "type": "string" + }, + "hosts_added": { + "description": "How many hosts were added in the associated month, consuming more license capacity", + "readOnly": true, + "title": "Hosts added", + "type": "integer" + }, + "hosts_deleted": { + "description": "How many hosts were deleted in the associated month, freeing the license capacity", + "readOnly": true, + "title": "Hosts deleted", + "type": "integer" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "indirectly_managed_hosts": { + "description": "Manually entered number indirectly managed hosts for a certain month", + "readOnly": true, + "title": "Indirectly managed hosts", + "type": "integer" + }, + "license_capacity": { + "description": "'License capacity as max. number of unique hosts", + "readOnly": true, + "title": "License capacity", + "type": "integer" + }, + "license_consumed": { + "description": "How many unique hosts are consumed from the license", + "readOnly": true, + "title": "License consumed", + "type": "integer" + } + }, + "type": "object" + }, + "HostVariableData": { + "properties": { + "variables": { + "default": "", + "description": "Host variables in JSON or YAML format.", + "title": "Variables", + "type": "string", + "x-nullable": true + } + }, + "type": "object" + }, + "Instance": { + "properties": { + "capacity": { + "minimum": 0, + "readOnly": true, + "title": "Capacity", + "type": "integer" + }, + "capacity_adjustment": { + "default": "1.00", + "format": "decimal", + "title": "Capacity adjustment", + "type": "string" + }, + "consumed_capacity": { + "readOnly": true, + "title": "Consumed capacity", + "type": "string" + }, + "cpu": { + "format": "decimal", + "readOnly": true, + "title": "Cpu", + "type": "string" + }, + "cpu_capacity": { + "readOnly": true, + "title": "Cpu capacity", + "type": "integer" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "enabled": { + "default": true, + "title": "Enabled", + "type": "boolean", + "x-nullable": true + }, + "errors": { + "description": "Any error details from the last health check.", + "minLength": 1, + "readOnly": true, + "title": "Errors", + "type": "string" + }, + "health_check_pending": { + "readOnly": true, + "title": "Health check pending", + "type": "string" + }, + "health_check_started": { + "description": "The last time a health check was initiated on this instance.", + "format": "date-time", + "readOnly": true, + "title": "Health check started", + "type": "string", + "x-nullable": true + }, + "hostname": { + "maxLength": 250, + "minLength": 1, + "title": "Hostname", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "ip_address": { + "minLength": 1, + "readOnly": true, + "title": "Ip address", + "type": "string", + "x-nullable": true + }, + "jobs_running": { + "description": "Count of jobs in the running or waiting state that are targeted for this instance", + "readOnly": true, + "title": "Jobs running", + "type": "integer" + }, + "jobs_total": { + "description": "Count of all jobs that target this instance", + "readOnly": true, + "title": "Jobs total", + "type": "integer" + }, + "last_health_check": { + "description": "Last time a health check was ran on this instance to refresh cpu, memory, and capacity.", + "format": "date-time", + "readOnly": true, + "title": "Last health check", + "type": "string", + "x-nullable": true + }, + "last_seen": { + "description": "Last time instance ran its heartbeat task for main cluster nodes. Last known connection to receptor mesh for execution nodes.", + "format": "date-time", + "readOnly": true, + "title": "Last seen", + "type": "string", + "x-nullable": true + }, + "listener_port": { + "description": "Port that Receptor will listen for incoming connections on.", + "maximum": 65535, + "minimum": 0, + "title": "Listener port", + "type": "integer", + "x-nullable": true + }, + "managed_by_policy": { + "default": true, + "title": "Managed by policy", + "type": "boolean", + "x-nullable": true + }, + "mem_capacity": { + "readOnly": true, + "title": "Mem capacity", + "type": "integer" + }, + "memory": { + "description": "Total system memory of this instance in bytes.", + "readOnly": true, + "title": "Memory", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "node_state": { + "default": "installed", + "description": "Indicates the current life cycle stage of this instance.", + "enum": [ + "provisioning", + "provision-fail", + "installed", + "ready", + "unavailable", + "deprovisioning", + "deprovision-fail" + ], + "title": "Node state", + "type": "string", + "x-nullable": true + }, + "node_type": { + "default": "execution", + "description": "Role that this node plays in the mesh.", + "enum": [ + "control", + "execution", + "hybrid", + "hop" + ], + "title": "Node type", + "type": "string", + "x-nullable": true + }, + "peers": { + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true + }, + "peers_from_control_nodes": { + "default": false, + "description": "If True, control plane cluster nodes should automatically peer to it.", + "title": "Peers from control nodes", + "type": "boolean", + "x-nullable": true + }, + "percent_capacity_remaining": { + "readOnly": true, + "title": "Percent capacity remaining", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "uuid": { + "minLength": 1, + "readOnly": true, + "title": "Uuid", + "type": "string", + "x-nullable": true + }, + "version": { + "minLength": 1, + "readOnly": true, + "title": "Version", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "hostname" + ], + "type": "object" + }, + "InstanceGroup": { + "properties": { + "capacity": { + "readOnly": true, + "title": "Capacity", + "type": "string" + }, + "consumed_capacity": { + "readOnly": true, + "title": "Consumed capacity", + "type": "string" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credential": { + "title": "Credential", + "type": "integer", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "instances": { + "readOnly": true, + "title": "Instances", + "type": "string" + }, + "is_container_group": { + "description": "Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster.", + "title": "Is container group", + "type": "boolean" + }, + "jobs_running": { + "readOnly": true, + "title": "Jobs running", + "type": "string" + }, + "jobs_total": { + "description": "Count of all jobs that target this instance group", + "readOnly": true, + "title": "Jobs total", + "type": "integer" + }, + "max_concurrent_jobs": { + "default": 0, + "description": "Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced.", + "minimum": 0, + "title": "Max Concurrent Jobs", + "type": "integer" + }, + "max_forks": { + "default": 0, + "description": "Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced.", + "minimum": 0, + "title": "Max Forks", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 250, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "percent_capacity_remaining": { + "readOnly": true, + "title": "Percent capacity remaining", + "type": "string" + }, + "pod_spec_override": { + "default": "", + "title": "Pod spec override", + "type": "string", + "x-nullable": true + }, + "policy_instance_list": { + "description": "List of exact-match Instances that will be assigned to this group", + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "policy_instance_minimum": { + "default": 0, + "description": "Static minimum number of Instances that will be automatically assign to this group when new instances come online.", + "minimum": 0, + "title": "Policy Instance Minimum", + "type": "integer" + }, + "policy_instance_percentage": { + "default": 0, + "description": "Minimum percentage of all instances that will be automatically assigned to this group when new instances come online.", + "maximum": 100, + "minimum": 0, + "title": "Policy Instance Percentage", + "type": "integer" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "InstanceHealthCheck": { + "properties": { + "capacity": { + "readOnly": true, + "title": "Capacity", + "type": "integer" + }, + "cpu": { + "format": "decimal", + "readOnly": true, + "title": "Cpu", + "type": "string" + }, + "cpu_capacity": { + "readOnly": true, + "title": "Cpu capacity", + "type": "integer" + }, + "errors": { + "description": "Any error details from the last health check.", + "minLength": 1, + "readOnly": true, + "title": "Errors", + "type": "string" + }, + "hostname": { + "minLength": 1, + "readOnly": true, + "title": "Hostname", + "type": "string", + "x-nullable": true + }, + "ip_address": { + "minLength": 1, + "readOnly": true, + "title": "Ip address", + "type": "string", + "x-nullable": true + }, + "last_health_check": { + "description": "Last time a health check was ran on this instance to refresh cpu, memory, and capacity.", + "format": "date-time", + "readOnly": true, + "title": "Last health check", + "type": "string", + "x-nullable": true + }, + "mem_capacity": { + "readOnly": true, + "title": "Mem capacity", + "type": "integer" + }, + "memory": { + "description": "Total system memory of this instance in bytes.", + "readOnly": true, + "title": "Memory", + "type": "integer" + }, + "uuid": { + "minLength": 1, + "readOnly": true, + "title": "Uuid", + "type": "string", + "x-nullable": true + }, + "version": { + "minLength": 1, + "readOnly": true, + "title": "Version", + "type": "string", + "x-nullable": true + } + }, + "type": "object" + }, + "Inventory": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "has_active_failures": { + "description": "This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed.", + "readOnly": true, + "title": "Has active failures", + "type": "boolean" + }, + "has_inventory_sources": { + "description": "This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources.", + "readOnly": true, + "title": "Has inventory sources", + "type": "boolean" + }, + "host_filter": { + "description": "Filter that will be applied to the hosts of this inventory.", + "title": "Host filter", + "type": "string", + "x-nullable": true + }, + "hosts_with_active_failures": { + "description": "This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures.", + "minimum": 0, + "readOnly": true, + "title": "Hosts with active failures", + "type": "integer" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inventory_sources_with_failures": { + "description": "Number of external inventory sources in this inventory with failures.", + "minimum": 0, + "readOnly": true, + "title": "Inventory sources with failures", + "type": "integer" + }, + "kind": { + "default": "", + "description": "Kind of inventory being represented.", + "enum": [ + "", + "smart", + "constructed" + ], + "title": "Kind", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "organization": { + "description": "Organization containing this inventory.", + "title": "Organization", + "type": "integer", + "x-nullable": true + }, + "pending_deletion": { + "description": "Flag indicating the inventory is being deleted.", + "readOnly": true, + "title": "Pending deletion", + "type": "boolean" + }, + "prevent_instance_group_fallback": { + "default": false, + "description": "If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied.", + "title": "Prevent instance group fallback", + "type": "boolean", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "total_groups": { + "description": "This field is deprecated and will be removed in a future release. Total number of groups in this inventory.", + "minimum": 0, + "readOnly": true, + "title": "Total groups", + "type": "integer" + }, + "total_hosts": { + "description": "This field is deprecated and will be removed in a future release. Total number of hosts in this inventory.", + "minimum": 0, + "readOnly": true, + "title": "Total hosts", + "type": "integer" + }, + "total_inventory_sources": { + "description": "Total number of external inventory sources configured within this inventory.", + "minimum": 0, + "readOnly": true, + "title": "Total inventory sources", + "type": "integer" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "variables": { + "default": "", + "description": "Inventory variables in JSON or YAML format.", + "title": "Variables", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name", + "organization" + ], + "type": "object" + }, + "InventoryScript": { + "properties": {}, + "type": "object" + }, + "InventorySource": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credential": { + "description": "Cloud credential to use for inventory updates.", + "minimum": 1, + "title": "Credential", + "type": "integer", + "x-nullable": true + }, + "custom_virtualenv": { + "description": "Local absolute file path containing a custom Python virtualenv to use", + "minLength": 1, + "readOnly": true, + "title": "Custom virtualenv", + "type": "string", + "x-nullable": true + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "enabled_value": { + "default": "", + "description": "Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var=\"status.power_state\"and enabled_value=\"powered_on\" with host variables:{ \"status\": { \"power_state\": \"powered_on\", \"created\": \"2018-02-01T08:00:00.000000Z:00\", \"healthy\": true }, \"name\": \"foobar\", \"ip_address\": \"192.168.2.1\"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled", + "title": "Enabled value", + "type": "string", + "x-nullable": true + }, + "enabled_var": { + "default": "", + "description": "Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as \"foo.bar\", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get(\"foo\", {}).get(\"bar\", default)", + "title": "Enabled var", + "type": "string", + "x-nullable": true + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "host_filter": { + "default": "", + "description": "This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.", + "title": "Host filter", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inventory": { + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "last_job_failed": { + "readOnly": true, + "title": "Last job failed", + "type": "boolean" + }, + "last_job_run": { + "format": "date-time", + "readOnly": true, + "title": "Last job run", + "type": "string", + "x-nullable": true + }, + "last_update_failed": { + "readOnly": true, + "title": "Last update failed", + "type": "boolean" + }, + "last_updated": { + "format": "date-time", + "readOnly": true, + "title": "Last updated", + "type": "string" + }, + "limit": { + "default": "", + "description": "Enter host, group or pattern match", + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "next_job_run": { + "format": "date-time", + "readOnly": true, + "title": "Next job run", + "type": "string", + "x-nullable": true + }, + "overwrite": { + "default": false, + "description": "Overwrite local groups and hosts from remote inventory source.", + "title": "Overwrite", + "type": "boolean", + "x-nullable": true + }, + "overwrite_vars": { + "default": false, + "description": "Overwrite local variables from remote inventory source.", + "title": "Overwrite vars", + "type": "boolean", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scm_branch": { + "default": "", + "description": "Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true.", + "maxLength": 1024, + "title": "Scm branch", + "type": "string", + "x-nullable": true + }, + "source": { + "enum": [ + "file", + "constructed", + "scm", + "ec2", + "gce", + "azure_rm", + "vmware", + "satellite6", + "openstack", + "rhv", + "controller", + "insights" + ], + "title": "Source", + "type": "string", + "x-nullable": true + }, + "source_path": { + "default": "", + "maxLength": 1024, + "title": "Source path", + "type": "string", + "x-nullable": true + }, + "source_project": { + "description": "Project containing inventory file used as source.", + "title": "Source project", + "type": "string", + "x-nullable": true + }, + "source_vars": { + "default": "", + "description": "Inventory source variables in YAML or JSON format.", + "title": "Source vars", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled", + "never updated", + "none" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timeout": { + "default": 0, + "description": "The amount of time (in seconds) to run before the task is canceled.", + "title": "Timeout", + "type": "integer" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "update_cache_timeout": { + "default": 0, + "minimum": 0, + "title": "Update cache timeout", + "type": "integer" + }, + "update_on_launch": { + "default": false, + "title": "Update on launch", + "type": "boolean", + "x-nullable": true + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "verbosity": { + "default": 1, + "enum": [ + 0, + 1, + 2 + ], + "title": "Verbosity", + "type": "integer" + } + }, + "required": [ + "name", + "inventory" + ], + "type": "object" + }, + "InventorySourceUpdate": { + "properties": { + "can_update": { + "readOnly": true, + "title": "Can update", + "type": "boolean" + } + }, + "type": "object" + }, + "InventoryUpdateCancel": { + "properties": { + "can_cancel": { + "readOnly": true, + "title": "Can cancel", + "type": "boolean" + } + }, + "type": "object" + }, + "InventoryUpdateDetail": { + "properties": { + "canceled_on": { + "description": "The date and time when the cancel request was sent.", + "format": "date-time", + "readOnly": true, + "title": "Canceled on", + "type": "string", + "x-nullable": true + }, + "controller_node": { + "description": "The instance that managed the execution environment.", + "minLength": 1, + "readOnly": true, + "title": "Controller node", + "type": "string" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credential": { + "description": "Cloud credential to use for inventory updates.", + "minimum": 1, + "title": "Credential", + "type": "integer", + "x-nullable": true + }, + "custom_virtualenv": { + "readOnly": true, + "title": "Custom virtualenv", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "elapsed": { + "description": "Elapsed time in seconds that the job ran.", + "format": "decimal", + "readOnly": true, + "title": "Elapsed", + "type": "string" + }, + "enabled_value": { + "default": "", + "description": "Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var=\"status.power_state\"and enabled_value=\"powered_on\" with host variables:{ \"status\": { \"power_state\": \"powered_on\", \"created\": \"2018-02-01T08:00:00.000000Z:00\", \"healthy\": true }, \"name\": \"foobar\", \"ip_address\": \"192.168.2.1\"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled", + "title": "Enabled value", + "type": "string", + "x-nullable": true + }, + "enabled_var": { + "default": "", + "description": "Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as \"foo.bar\", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get(\"foo\", {}).get(\"bar\", default)", + "title": "Enabled var", + "type": "string", + "x-nullable": true + }, + "event_processing_finished": { + "description": "Indicates whether all of the events generated by this unified job have been saved to the database.", + "readOnly": true, + "title": "Event processing finished", + "type": "boolean" + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "execution_node": { + "description": "The node the job executed on.", + "minLength": 1, + "readOnly": true, + "title": "Execution node", + "type": "string" + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "finished": { + "description": "The date and time the job finished execution.", + "format": "date-time", + "readOnly": true, + "title": "Finished", + "type": "string", + "x-nullable": true + }, + "host_filter": { + "default": "", + "description": "This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.", + "title": "Host filter", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "instance_group": { + "description": "The Instance group the job was run under", + "title": "Instance group", + "type": "integer", + "x-nullable": true + }, + "inventory": { + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "inventory_source": { + "readOnly": true, + "title": "Inventory source", + "type": "string" + }, + "job_args": { + "minLength": 1, + "readOnly": true, + "title": "Job args", + "type": "string" + }, + "job_cwd": { + "minLength": 1, + "readOnly": true, + "title": "Job cwd", + "type": "string" + }, + "job_env": { + "readOnly": true, + "title": "job_env", + "type": "object" + }, + "job_explanation": { + "description": "A status field to indicate the state of the job if it wasn't able to run and capture stdout", + "minLength": 1, + "readOnly": true, + "title": "Job explanation", + "type": "string" + }, + "launch_type": { + "enum": [ + "manual", + "relaunch", + "callback", + "scheduled", + "dependency", + "workflow", + "webhook", + "sync", + "scm" + ], + "readOnly": true, + "title": "Launch type", + "type": "string" + }, + "launched_by": { + "readOnly": true, + "title": "Launched by", + "type": "string" + }, + "license_error": { + "readOnly": true, + "title": "License error", + "type": "boolean" + }, + "limit": { + "default": "", + "description": "Enter host, group or pattern match", + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "org_host_limit_error": { + "readOnly": true, + "title": "Org host limit error", + "type": "boolean" + }, + "overwrite": { + "default": false, + "description": "Overwrite local groups and hosts from remote inventory source.", + "title": "Overwrite", + "type": "boolean", + "x-nullable": true + }, + "overwrite_vars": { + "default": false, + "description": "Overwrite local variables from remote inventory source.", + "title": "Overwrite vars", + "type": "boolean", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "result_traceback": { + "minLength": 1, + "readOnly": true, + "title": "Result traceback", + "type": "string" + }, + "scm_branch": { + "default": "", + "description": "Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true.", + "maxLength": 1024, + "title": "Scm branch", + "type": "string", + "x-nullable": true + }, + "scm_revision": { + "description": "The SCM Revision from the Project used for this inventory update. Only applicable to inventories source from scm", + "minLength": 1, + "readOnly": true, + "title": "SCM Revision", + "type": "string" + }, + "source": { + "enum": [ + "file", + "constructed", + "scm", + "ec2", + "gce", + "azure_rm", + "vmware", + "satellite6", + "openstack", + "rhv", + "controller", + "insights" + ], + "title": "Source", + "type": "string", + "x-nullable": true + }, + "source_path": { + "default": "", + "maxLength": 1024, + "title": "Source path", + "type": "string", + "x-nullable": true + }, + "source_project": { + "description": "The project used for this job.", + "readOnly": true, + "title": "Source project", + "type": "string" + }, + "source_project_update": { + "description": "Inventory files from this Project Update were used for the inventory update.", + "title": "Source project update", + "type": "string", + "x-nullable": true + }, + "source_vars": { + "default": "", + "description": "Inventory source variables in YAML or JSON format.", + "title": "Source vars", + "type": "string", + "x-nullable": true + }, + "started": { + "description": "The date and time the job was queued for starting.", + "format": "date-time", + "readOnly": true, + "title": "Started", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timeout": { + "default": 0, + "description": "The amount of time (in seconds) to run before the task is canceled.", + "title": "Timeout", + "type": "integer" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "readOnly": true, + "title": "unified job template", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "verbosity": { + "default": 1, + "enum": [ + 0, + 1, + 2 + ], + "title": "Verbosity", + "type": "integer" + }, + "work_unit_id": { + "description": "The Receptor work unit ID associated with this job.", + "minLength": 1, + "readOnly": true, + "title": "Work unit id", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "InventoryUpdateEvent": { + "properties": { + "changed": { + "readOnly": true, + "title": "Changed", + "type": "string" + }, + "counter": { + "minimum": 0, + "readOnly": true, + "title": "Counter", + "type": "integer" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "end_line": { + "minimum": 0, + "readOnly": true, + "title": "End line", + "type": "integer" + }, + "event": { + "readOnly": true, + "title": "Event", + "type": "string" + }, + "event_data": { + "default": {}, + "title": "Event data", + "type": "object" + }, + "event_display": { + "minLength": 1, + "readOnly": true, + "title": "Event display", + "type": "string" + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inventory_update": { + "readOnly": true, + "title": "Inventory update", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "start_line": { + "minimum": 0, + "readOnly": true, + "title": "Start line", + "type": "integer" + }, + "stdout": { + "minLength": 1, + "readOnly": true, + "title": "Stdout", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "uuid": { + "minLength": 1, + "readOnly": true, + "title": "Uuid", + "type": "string" + }, + "verbosity": { + "minimum": 0, + "readOnly": true, + "title": "Verbosity", + "type": "integer" + } + }, + "type": "object" + }, + "InventoryUpdateList": { + "properties": { + "canceled_on": { + "description": "The date and time when the cancel request was sent.", + "format": "date-time", + "readOnly": true, + "title": "Canceled on", + "type": "string", + "x-nullable": true + }, + "controller_node": { + "description": "The instance that managed the execution environment.", + "minLength": 1, + "readOnly": true, + "title": "Controller node", + "type": "string" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credential": { + "description": "Cloud credential to use for inventory updates.", + "minimum": 1, + "title": "Credential", + "type": "integer", + "x-nullable": true + }, + "custom_virtualenv": { + "readOnly": true, + "title": "Custom virtualenv", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "elapsed": { + "description": "Elapsed time in seconds that the job ran.", + "format": "decimal", + "readOnly": true, + "title": "Elapsed", + "type": "string" + }, + "enabled_value": { + "default": "", + "description": "Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var=\"status.power_state\"and enabled_value=\"powered_on\" with host variables:{ \"status\": { \"power_state\": \"powered_on\", \"created\": \"2018-02-01T08:00:00.000000Z:00\", \"healthy\": true }, \"name\": \"foobar\", \"ip_address\": \"192.168.2.1\"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled", + "title": "Enabled value", + "type": "string", + "x-nullable": true + }, + "enabled_var": { + "default": "", + "description": "Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as \"foo.bar\", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get(\"foo\", {}).get(\"bar\", default)", + "title": "Enabled var", + "type": "string", + "x-nullable": true + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "execution_node": { + "description": "The node the job executed on.", + "minLength": 1, + "readOnly": true, + "title": "Execution node", + "type": "string" + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "finished": { + "description": "The date and time the job finished execution.", + "format": "date-time", + "readOnly": true, + "title": "Finished", + "type": "string", + "x-nullable": true + }, + "host_filter": { + "default": "", + "description": "This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.", + "title": "Host filter", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "instance_group": { + "description": "The Instance group the job was run under", + "title": "Instance group", + "type": "integer", + "x-nullable": true + }, + "inventory": { + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "inventory_source": { + "readOnly": true, + "title": "Inventory source", + "type": "string" + }, + "job_explanation": { + "description": "A status field to indicate the state of the job if it wasn't able to run and capture stdout", + "minLength": 1, + "readOnly": true, + "title": "Job explanation", + "type": "string" + }, + "launch_type": { + "enum": [ + "manual", + "relaunch", + "callback", + "scheduled", + "dependency", + "workflow", + "webhook", + "sync", + "scm" + ], + "readOnly": true, + "title": "Launch type", + "type": "string" + }, + "launched_by": { + "readOnly": true, + "title": "Launched by", + "type": "string" + }, + "license_error": { + "readOnly": true, + "title": "License error", + "type": "boolean" + }, + "limit": { + "default": "", + "description": "Enter host, group or pattern match", + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "org_host_limit_error": { + "readOnly": true, + "title": "Org host limit error", + "type": "boolean" + }, + "overwrite": { + "default": false, + "description": "Overwrite local groups and hosts from remote inventory source.", + "title": "Overwrite", + "type": "boolean", + "x-nullable": true + }, + "overwrite_vars": { + "default": false, + "description": "Overwrite local variables from remote inventory source.", + "title": "Overwrite vars", + "type": "boolean", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scm_branch": { + "default": "", + "description": "Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true.", + "maxLength": 1024, + "title": "Scm branch", + "type": "string", + "x-nullable": true + }, + "scm_revision": { + "description": "The SCM Revision from the Project used for this inventory update. Only applicable to inventories source from scm", + "minLength": 1, + "readOnly": true, + "title": "SCM Revision", + "type": "string" + }, + "source": { + "enum": [ + "file", + "constructed", + "scm", + "ec2", + "gce", + "azure_rm", + "vmware", + "satellite6", + "openstack", + "rhv", + "controller", + "insights" + ], + "title": "Source", + "type": "string", + "x-nullable": true + }, + "source_path": { + "default": "", + "maxLength": 1024, + "title": "Source path", + "type": "string", + "x-nullable": true + }, + "source_project_update": { + "description": "Inventory files from this Project Update were used for the inventory update.", + "title": "Source project update", + "type": "string", + "x-nullable": true + }, + "source_vars": { + "default": "", + "description": "Inventory source variables in YAML or JSON format.", + "title": "Source vars", + "type": "string", + "x-nullable": true + }, + "started": { + "description": "The date and time the job was queued for starting.", + "format": "date-time", + "readOnly": true, + "title": "Started", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timeout": { + "default": 0, + "description": "The amount of time (in seconds) to run before the task is canceled.", + "title": "Timeout", + "type": "integer" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "readOnly": true, + "title": "unified job template", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "verbosity": { + "default": 1, + "enum": [ + 0, + 1, + 2 + ], + "title": "Verbosity", + "type": "integer" + }, + "work_unit_id": { + "description": "The Receptor work unit ID associated with this job.", + "minLength": 1, + "readOnly": true, + "title": "Work unit id", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "InventoryVariableData": { + "properties": { + "variables": { + "default": "", + "description": "Inventory variables in JSON or YAML format.", + "title": "Variables", + "type": "string", + "x-nullable": true + } + }, + "type": "object" + }, + "JobCancel": { + "properties": { + "can_cancel": { + "readOnly": true, + "title": "Can cancel", + "type": "boolean" + } + }, + "type": "object" + }, + "JobCreateSchedule": { + "properties": { + "can_schedule": { + "readOnly": true, + "title": "Can schedule", + "type": "string" + }, + "prompts": { + "readOnly": true, + "title": "Prompts", + "type": "string" + } + }, + "type": "object" + }, + "JobDetail": { + "properties": { + "allow_simultaneous": { + "default": false, + "title": "Allow simultaneous", + "type": "boolean", + "x-nullable": true + }, + "artifacts": { + "readOnly": true, + "title": "Artifacts", + "type": "string" + }, + "canceled_on": { + "description": "The date and time when the cancel request was sent.", + "format": "date-time", + "readOnly": true, + "title": "Canceled on", + "type": "string", + "x-nullable": true + }, + "controller_node": { + "description": "The instance that managed the execution environment.", + "minLength": 1, + "readOnly": true, + "title": "Controller node", + "type": "string" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "custom_virtualenv": { + "readOnly": true, + "title": "Custom virtualenv", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "diff_mode": { + "default": false, + "description": "If enabled, textual changes made to any templated files on the host are shown in the standard output", + "title": "Diff mode", + "type": "boolean", + "x-nullable": true + }, + "elapsed": { + "description": "Elapsed time in seconds that the job ran.", + "format": "decimal", + "readOnly": true, + "title": "Elapsed", + "type": "string" + }, + "event_processing_finished": { + "description": "Indicates whether all of the events generated by this unified job have been saved to the database.", + "readOnly": true, + "title": "Event processing finished", + "type": "boolean" + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "execution_node": { + "description": "The node the job executed on.", + "minLength": 1, + "readOnly": true, + "title": "Execution node", + "type": "string" + }, + "extra_vars": { + "default": "", + "title": "Extra vars", + "type": "string", + "x-nullable": true + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "finished": { + "description": "The date and time the job finished execution.", + "format": "date-time", + "readOnly": true, + "title": "Finished", + "type": "string", + "x-nullable": true + }, + "force_handlers": { + "default": false, + "title": "Force handlers", + "type": "boolean", + "x-nullable": true + }, + "forks": { + "default": 0, + "minimum": 0, + "title": "Forks", + "type": "integer" + }, + "host_status_counts": { + "description": "Playbook stats from the Ansible playbook_on_stats event.", + "readOnly": true, + "title": "Host status counts", + "type": "object", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "instance_group": { + "description": "The Instance group the job was run under", + "title": "Instance group", + "type": "integer", + "x-nullable": true + }, + "inventory": { + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "job_args": { + "minLength": 1, + "readOnly": true, + "title": "Job args", + "type": "string" + }, + "job_cwd": { + "minLength": 1, + "readOnly": true, + "title": "Job cwd", + "type": "string" + }, + "job_env": { + "readOnly": true, + "title": "job_env", + "type": "object" + }, + "job_explanation": { + "description": "A status field to indicate the state of the job if it wasn't able to run and capture stdout", + "minLength": 1, + "readOnly": true, + "title": "Job explanation", + "type": "string" + }, + "job_slice_count": { + "default": 1, + "description": "If ran as part of sliced jobs, the total number of slices. If 1, job is not part of a sliced job.", + "minimum": 0, + "title": "Job slice count", + "type": "integer" + }, + "job_slice_number": { + "default": 0, + "description": "If part of a sliced job, the ID of the inventory slice operated on. If not part of sliced job, parameter is not used.", + "minimum": 0, + "title": "Job slice number", + "type": "integer" + }, + "job_tags": { + "default": "", + "title": "Job tags", + "type": "string", + "x-nullable": true + }, + "job_template": { + "title": "Job template", + "type": "string", + "x-nullable": true + }, + "job_type": { + "default": "run", + "enum": [ + "run", + "check", + "scan" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "launch_type": { + "enum": [ + "manual", + "relaunch", + "callback", + "scheduled", + "dependency", + "workflow", + "webhook", + "sync", + "scm" + ], + "readOnly": true, + "title": "Launch type", + "type": "string" + }, + "launched_by": { + "readOnly": true, + "title": "Launched by", + "type": "string" + }, + "limit": { + "default": "", + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "organization": { + "description": "The organization used to determine access to this unified job.", + "readOnly": true, + "title": "Organization", + "type": "integer", + "x-nullable": true + }, + "passwords_needed_to_start": { + "readOnly": true, + "title": "Passwords needed to start", + "type": "string" + }, + "playbook": { + "default": "", + "maxLength": 1024, + "title": "Playbook", + "type": "string", + "x-nullable": true + }, + "playbook_counts": { + "description": "A count of all plays and tasks for the job run.", + "readOnly": true, + "title": "Playbook counts", + "type": "string" + }, + "project": { + "title": "Project", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "result_traceback": { + "minLength": 1, + "readOnly": true, + "title": "Result traceback", + "type": "string" + }, + "scm_branch": { + "default": "", + "description": "Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true.", + "maxLength": 1024, + "title": "Scm branch", + "type": "string", + "x-nullable": true + }, + "scm_revision": { + "description": "The SCM Revision from the Project used for this job, if available", + "minLength": 1, + "readOnly": true, + "title": "SCM Revision", + "type": "string" + }, + "skip_tags": { + "default": "", + "maxLength": 1024, + "title": "Skip tags", + "type": "string", + "x-nullable": true + }, + "start_at_task": { + "default": "", + "maxLength": 1024, + "title": "Start at task", + "type": "string", + "x-nullable": true + }, + "started": { + "description": "The date and time the job was queued for starting.", + "format": "date-time", + "readOnly": true, + "title": "Started", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timeout": { + "default": 0, + "description": "The amount of time (in seconds) to run before the task is canceled.", + "title": "Timeout", + "type": "integer" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "readOnly": true, + "title": "unified job template", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "use_fact_cache": { + "default": false, + "description": "If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible.", + "title": "Use fact cache", + "type": "boolean", + "x-nullable": true + }, + "verbosity": { + "default": 0, + "enum": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "title": "Verbosity", + "type": "integer" + }, + "webhook_credential": { + "description": "Personal Access Token for posting back the status to the service API", + "title": "Webhook credential", + "type": "integer", + "x-nullable": true + }, + "webhook_guid": { + "description": "Unique identifier of the event that triggered this webhook", + "maxLength": 128, + "title": "Webhook guid", + "type": "string", + "x-nullable": true + }, + "webhook_service": { + "description": "Service that webhook requests will be accepted from", + "enum": [ + "github", + "gitlab" + ], + "title": "Webhook service", + "type": "string", + "x-nullable": true + }, + "work_unit_id": { + "description": "The Receptor work unit ID associated with this job.", + "minLength": 1, + "readOnly": true, + "title": "Work unit id", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "JobEvent": { + "properties": { + "changed": { + "readOnly": true, + "title": "Changed", + "type": "boolean" + }, + "counter": { + "minimum": 0, + "readOnly": true, + "title": "Counter", + "type": "integer" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "end_line": { + "minimum": 0, + "readOnly": true, + "title": "End line", + "type": "integer" + }, + "event": { + "enum": [ + "runner_on_failed", + "runner_on_start", + "runner_on_ok", + "runner_on_error", + "runner_on_skipped", + "runner_on_unreachable", + "runner_on_no_hosts", + "runner_on_async_poll", + "runner_on_async_ok", + "runner_on_async_failed", + "runner_item_on_ok", + "runner_item_on_failed", + "runner_item_on_skipped", + "runner_retry", + "runner_on_file_diff", + "playbook_on_start", + "playbook_on_notify", + "playbook_on_include", + "playbook_on_no_hosts_matched", + "playbook_on_no_hosts_remaining", + "playbook_on_task_start", + "playbook_on_vars_prompt", + "playbook_on_setup", + "playbook_on_import_for_host", + "playbook_on_not_import_for_host", + "playbook_on_play_start", + "playbook_on_stats", + "debug", + "verbose", + "deprecated", + "warning", + "system_warning", + "error" + ], + "title": "Event", + "type": "string", + "x-nullable": true + }, + "event_data": { + "default": {}, + "title": "Event data", + "type": "object" + }, + "event_display": { + "minLength": 1, + "readOnly": true, + "title": "Event display", + "type": "string" + }, + "event_level": { + "readOnly": true, + "title": "Event level", + "type": "integer" + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "host": { + "readOnly": true, + "title": "Host", + "type": "integer", + "x-nullable": true + }, + "host_name": { + "minLength": 1, + "readOnly": true, + "title": "Host name", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "job": { + "readOnly": true, + "title": "Job", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "parent_uuid": { + "minLength": 1, + "readOnly": true, + "title": "Parent uuid", + "type": "string" + }, + "play": { + "minLength": 1, + "readOnly": true, + "title": "Play", + "type": "string" + }, + "playbook": { + "minLength": 1, + "readOnly": true, + "title": "Playbook", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "role": { + "minLength": 1, + "readOnly": true, + "title": "Role", + "type": "string" + }, + "start_line": { + "minimum": 0, + "readOnly": true, + "title": "Start line", + "type": "integer" + }, + "stdout": { + "minLength": 1, + "readOnly": true, + "title": "Stdout", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "task": { + "minLength": 1, + "readOnly": true, + "title": "Task", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "uuid": { + "minLength": 1, + "readOnly": true, + "title": "Uuid", + "type": "string" + }, + "verbosity": { + "minimum": 0, + "readOnly": true, + "title": "Verbosity", + "type": "integer" + } + }, + "required": [ + "event" + ], + "type": "object" + }, + "JobHostSummary": { + "properties": { + "changed": { + "minimum": 0, + "readOnly": true, + "title": "Changed", + "type": "integer" + }, + "constructed_host": { + "description": "Only for jobs run against constructed inventories, this links to the host inside the constructed inventory.", + "readOnly": true, + "title": "Constructed host", + "type": "integer", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "dark": { + "minimum": 0, + "readOnly": true, + "title": "Dark", + "type": "integer" + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "failures": { + "minimum": 0, + "readOnly": true, + "title": "Failures", + "type": "integer" + }, + "host": { + "readOnly": true, + "title": "Host", + "type": "integer", + "x-nullable": true + }, + "host_name": { + "default": "", + "minLength": 1, + "readOnly": true, + "title": "Host name", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "ignored": { + "minimum": 0, + "readOnly": true, + "title": "Ignored", + "type": "integer" + }, + "job": { + "readOnly": true, + "title": "Job", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "ok": { + "minimum": 0, + "readOnly": true, + "title": "Ok", + "type": "integer" + }, + "processed": { + "minimum": 0, + "readOnly": true, + "title": "Processed", + "type": "integer" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "rescued": { + "minimum": 0, + "readOnly": true, + "title": "Rescued", + "type": "integer" + }, + "skipped": { + "minimum": 0, + "readOnly": true, + "title": "Skipped", + "type": "integer" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "type": "object" + }, + "JobLaunch": { + "properties": { + "ask_credential_on_launch": { + "readOnly": true, + "title": "Ask credential on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_diff_mode_on_launch": { + "readOnly": true, + "title": "Ask diff mode on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_execution_environment_on_launch": { + "readOnly": true, + "title": "Ask execution environment on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_forks_on_launch": { + "readOnly": true, + "title": "Ask forks on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_instance_groups_on_launch": { + "readOnly": true, + "title": "Ask instance groups on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_inventory_on_launch": { + "readOnly": true, + "title": "Ask inventory on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_job_slice_count_on_launch": { + "readOnly": true, + "title": "Ask job slice count on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_job_type_on_launch": { + "readOnly": true, + "title": "Ask job type on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_labels_on_launch": { + "readOnly": true, + "title": "Ask labels on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_limit_on_launch": { + "readOnly": true, + "title": "Ask limit on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_scm_branch_on_launch": { + "readOnly": true, + "title": "Ask scm branch on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_skip_tags_on_launch": { + "readOnly": true, + "title": "Ask skip tags on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_tags_on_launch": { + "readOnly": true, + "title": "Ask tags on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_timeout_on_launch": { + "readOnly": true, + "title": "Ask timeout on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_variables_on_launch": { + "readOnly": true, + "title": "Ask variables on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_verbosity_on_launch": { + "readOnly": true, + "title": "Ask verbosity on launch", + "type": "boolean", + "x-nullable": true + }, + "can_start_without_user_input": { + "readOnly": true, + "title": "Can start without user input", + "type": "boolean" + }, + "credential_needed_to_start": { + "readOnly": true, + "title": "Credential needed to start", + "type": "string" + }, + "credential_passwords": { + "title": "Credential passwords", + "type": "string" + }, + "credentials": { + "items": { + "type": "integer" + }, + "type": "array", + "uniqueItems": true + }, + "defaults": { + "readOnly": true, + "title": "Defaults", + "type": "string" + }, + "diff_mode": { + "title": "Diff mode", + "type": "boolean" + }, + "execution_environment": { + "title": "Execution environment", + "type": "integer" + }, + "extra_vars": { + "title": "Extra vars", + "type": "object" + }, + "forks": { + "minimum": 0, + "title": "Forks", + "type": "integer" + }, + "instance_groups": { + "items": { + "type": "integer" + }, + "type": "array", + "uniqueItems": true + }, + "inventory": { + "title": "Inventory", + "type": "integer" + }, + "inventory_needed_to_start": { + "readOnly": true, + "title": "Inventory needed to start", + "type": "string" + }, + "job_slice_count": { + "minimum": 0, + "title": "Job slice count", + "type": "integer" + }, + "job_tags": { + "title": "Job tags", + "type": "string" + }, + "job_template_data": { + "readOnly": true, + "title": "Job template data", + "type": "string" + }, + "job_type": { + "enum": [ + "run", + "check" + ], + "title": "Job type", + "type": "string" + }, + "labels": { + "items": { + "type": "integer" + }, + "type": "array", + "uniqueItems": true + }, + "limit": { + "title": "Limit", + "type": "string" + }, + "passwords_needed_to_start": { + "readOnly": true, + "title": "Passwords needed to start", + "type": "string" + }, + "scm_branch": { + "title": "Scm branch", + "type": "string" + }, + "skip_tags": { + "title": "Skip tags", + "type": "string" + }, + "survey_enabled": { + "readOnly": true, + "title": "Survey enabled", + "type": "string" + }, + "timeout": { + "title": "Timeout", + "type": "integer" + }, + "variables_needed_to_start": { + "readOnly": true, + "title": "Variables needed to start", + "type": "string" + }, + "verbosity": { + "enum": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "title": "Verbosity", + "type": "integer" + } + }, + "type": "object" + }, + "JobList": { + "properties": { + "allow_simultaneous": { + "default": false, + "title": "Allow simultaneous", + "type": "boolean", + "x-nullable": true + }, + "artifacts": { + "readOnly": true, + "title": "Artifacts", + "type": "string" + }, + "canceled_on": { + "description": "The date and time when the cancel request was sent.", + "format": "date-time", + "readOnly": true, + "title": "Canceled on", + "type": "string", + "x-nullable": true + }, + "controller_node": { + "description": "The instance that managed the execution environment.", + "minLength": 1, + "readOnly": true, + "title": "Controller node", + "type": "string" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "diff_mode": { + "default": false, + "description": "If enabled, textual changes made to any templated files on the host are shown in the standard output", + "title": "Diff mode", + "type": "boolean", + "x-nullable": true + }, + "elapsed": { + "description": "Elapsed time in seconds that the job ran.", + "format": "decimal", + "readOnly": true, + "title": "Elapsed", + "type": "string" + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "execution_node": { + "description": "The node the job executed on.", + "minLength": 1, + "readOnly": true, + "title": "Execution node", + "type": "string" + }, + "extra_vars": { + "default": "", + "title": "Extra vars", + "type": "string", + "x-nullable": true + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "finished": { + "description": "The date and time the job finished execution.", + "format": "date-time", + "readOnly": true, + "title": "Finished", + "type": "string", + "x-nullable": true + }, + "force_handlers": { + "default": false, + "title": "Force handlers", + "type": "boolean", + "x-nullable": true + }, + "forks": { + "default": 0, + "minimum": 0, + "title": "Forks", + "type": "integer" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "instance_group": { + "description": "The Instance group the job was run under", + "title": "Instance group", + "type": "integer", + "x-nullable": true + }, + "inventory": { + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "job_explanation": { + "description": "A status field to indicate the state of the job if it wasn't able to run and capture stdout", + "minLength": 1, + "readOnly": true, + "title": "Job explanation", + "type": "string" + }, + "job_slice_count": { + "default": 1, + "description": "If ran as part of sliced jobs, the total number of slices. If 1, job is not part of a sliced job.", + "minimum": 0, + "title": "Job slice count", + "type": "integer" + }, + "job_slice_number": { + "default": 0, + "description": "If part of a sliced job, the ID of the inventory slice operated on. If not part of sliced job, parameter is not used.", + "minimum": 0, + "title": "Job slice number", + "type": "integer" + }, + "job_tags": { + "default": "", + "title": "Job tags", + "type": "string", + "x-nullable": true + }, + "job_template": { + "title": "Job template", + "type": "string", + "x-nullable": true + }, + "job_type": { + "default": "run", + "enum": [ + "run", + "check", + "scan" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "launch_type": { + "enum": [ + "manual", + "relaunch", + "callback", + "scheduled", + "dependency", + "workflow", + "webhook", + "sync", + "scm" + ], + "readOnly": true, + "title": "Launch type", + "type": "string" + }, + "launched_by": { + "readOnly": true, + "title": "Launched by", + "type": "string" + }, + "limit": { + "default": "", + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "organization": { + "description": "The organization used to determine access to this unified job.", + "readOnly": true, + "title": "Organization", + "type": "integer", + "x-nullable": true + }, + "passwords_needed_to_start": { + "readOnly": true, + "title": "Passwords needed to start", + "type": "string" + }, + "playbook": { + "default": "", + "maxLength": 1024, + "title": "Playbook", + "type": "string", + "x-nullable": true + }, + "project": { + "title": "Project", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scm_branch": { + "default": "", + "description": "Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true.", + "maxLength": 1024, + "title": "Scm branch", + "type": "string", + "x-nullable": true + }, + "scm_revision": { + "description": "The SCM Revision from the Project used for this job, if available", + "minLength": 1, + "readOnly": true, + "title": "SCM Revision", + "type": "string" + }, + "skip_tags": { + "default": "", + "maxLength": 1024, + "title": "Skip tags", + "type": "string", + "x-nullable": true + }, + "start_at_task": { + "default": "", + "maxLength": 1024, + "title": "Start at task", + "type": "string", + "x-nullable": true + }, + "started": { + "description": "The date and time the job was queued for starting.", + "format": "date-time", + "readOnly": true, + "title": "Started", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timeout": { + "default": 0, + "description": "The amount of time (in seconds) to run before the task is canceled.", + "title": "Timeout", + "type": "integer" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "readOnly": true, + "title": "unified job template", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "use_fact_cache": { + "default": false, + "description": "If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible.", + "title": "Use fact cache", + "type": "boolean", + "x-nullable": true + }, + "verbosity": { + "default": 0, + "enum": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "title": "Verbosity", + "type": "integer" + }, + "webhook_credential": { + "description": "Personal Access Token for posting back the status to the service API", + "title": "Webhook credential", + "type": "integer", + "x-nullable": true + }, + "webhook_guid": { + "description": "Unique identifier of the event that triggered this webhook", + "maxLength": 128, + "title": "Webhook guid", + "type": "string", + "x-nullable": true + }, + "webhook_service": { + "description": "Service that webhook requests will be accepted from", + "enum": [ + "github", + "gitlab" + ], + "title": "Webhook service", + "type": "string", + "x-nullable": true + }, + "work_unit_id": { + "description": "The Receptor work unit ID associated with this job.", + "minLength": 1, + "readOnly": true, + "title": "Work unit id", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "JobRelaunch": { + "properties": { + "credential_passwords": { + "title": "Credential passwords", + "type": "string" + }, + "hosts": { + "default": "all", + "enum": [ + "all", + "failed" + ], + "title": "Hosts", + "type": "string", + "x-nullable": true + }, + "passwords_needed_to_start": { + "readOnly": true, + "title": "Passwords needed to start", + "type": "string" + }, + "retry_counts": { + "readOnly": true, + "title": "Retry counts", + "type": "string" + } + }, + "required": [ + "credential_passwords" + ], + "type": "object" + }, + "JobTemplate": { + "properties": { + "allow_simultaneous": { + "default": false, + "title": "Allow simultaneous", + "type": "boolean", + "x-nullable": true + }, + "ask_credential_on_launch": { + "default": false, + "title": "Ask credential on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_diff_mode_on_launch": { + "default": false, + "title": "Ask diff mode on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_execution_environment_on_launch": { + "default": false, + "title": "Ask execution environment on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_forks_on_launch": { + "default": false, + "title": "Ask forks on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_instance_groups_on_launch": { + "default": false, + "title": "Ask instance groups on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_inventory_on_launch": { + "default": false, + "title": "Ask inventory on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_job_slice_count_on_launch": { + "default": false, + "title": "Ask job slice count on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_job_type_on_launch": { + "default": false, + "title": "Ask job type on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_labels_on_launch": { + "default": false, + "title": "Ask labels on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_limit_on_launch": { + "default": false, + "title": "Ask limit on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_scm_branch_on_launch": { + "default": false, + "title": "Ask scm branch on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_skip_tags_on_launch": { + "default": false, + "title": "Ask skip tags on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_tags_on_launch": { + "default": false, + "title": "Ask tags on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_timeout_on_launch": { + "default": false, + "title": "Ask timeout on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_variables_on_launch": { + "default": false, + "title": "Ask variables on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_verbosity_on_launch": { + "default": false, + "title": "Ask verbosity on launch", + "type": "boolean", + "x-nullable": true + }, + "become_enabled": { + "default": false, + "title": "Become enabled", + "type": "boolean", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "custom_virtualenv": { + "description": "Local absolute file path containing a custom Python virtualenv to use", + "minLength": 1, + "readOnly": true, + "title": "Custom virtualenv", + "type": "string", + "x-nullable": true + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "diff_mode": { + "default": false, + "description": "If enabled, textual changes made to any templated files on the host are shown in the standard output", + "title": "Diff mode", + "type": "boolean", + "x-nullable": true + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "extra_vars": { + "default": "", + "title": "Extra vars", + "type": "string", + "x-nullable": true + }, + "force_handlers": { + "default": false, + "title": "Force handlers", + "type": "boolean", + "x-nullable": true + }, + "forks": { + "default": 0, + "minimum": 0, + "title": "Forks", + "type": "integer" + }, + "host_config_key": { + "default": "", + "maxLength": 1024, + "title": "Host config key", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inventory": { + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "job_slice_count": { + "default": 1, + "description": "The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1.", + "minimum": 0, + "title": "Job slice count", + "type": "integer" + }, + "job_tags": { + "default": "", + "title": "Job tags", + "type": "string", + "x-nullable": true + }, + "job_type": { + "default": "run", + "enum": [ + "run", + "check" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "last_job_failed": { + "readOnly": true, + "title": "Last job failed", + "type": "boolean" + }, + "last_job_run": { + "format": "date-time", + "readOnly": true, + "title": "Last job run", + "type": "string", + "x-nullable": true + }, + "limit": { + "default": "", + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "next_job_run": { + "format": "date-time", + "readOnly": true, + "title": "Next job run", + "type": "string", + "x-nullable": true + }, + "organization": { + "description": "The organization used to determine access to this template.", + "readOnly": true, + "title": "Organization", + "type": "integer", + "x-nullable": true + }, + "playbook": { + "default": "", + "maxLength": 1024, + "title": "Playbook", + "type": "string", + "x-nullable": true + }, + "prevent_instance_group_fallback": { + "default": false, + "description": "If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied.", + "title": "Prevent instance group fallback", + "type": "boolean", + "x-nullable": true + }, + "project": { + "title": "Project", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scm_branch": { + "default": "", + "description": "Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true.", + "maxLength": 1024, + "title": "Scm branch", + "type": "string", + "x-nullable": true + }, + "skip_tags": { + "default": "", + "maxLength": 1024, + "title": "Skip tags", + "type": "string", + "x-nullable": true + }, + "start_at_task": { + "default": "", + "maxLength": 1024, + "title": "Start at task", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled", + "never updated" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "survey_enabled": { + "default": false, + "title": "Survey enabled", + "type": "boolean", + "x-nullable": true + }, + "timeout": { + "default": 0, + "description": "The amount of time (in seconds) to run before the task is canceled.", + "title": "Timeout", + "type": "integer" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "use_fact_cache": { + "default": false, + "description": "If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible.", + "title": "Use fact cache", + "type": "boolean", + "x-nullable": true + }, + "verbosity": { + "default": 0, + "enum": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "title": "Verbosity", + "type": "integer" + }, + "webhook_credential": { + "description": "Personal Access Token for posting back the status to the service API", + "title": "Webhook credential", + "type": "integer", + "x-nullable": true + }, + "webhook_service": { + "description": "Service that webhook requests will be accepted from", + "enum": [ + "github", + "gitlab" + ], + "title": "Webhook service", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "Label": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "organization": { + "description": "Organization this label belongs to.", + "title": "Organization", + "type": "integer" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name", + "organization" + ], + "type": "object" + }, + "Notification": { + "properties": { + "body": { + "description": "Notification body", + "readOnly": true, + "title": "Body", + "type": "string" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "error": { + "minLength": 1, + "readOnly": true, + "title": "Error", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "notification_template": { + "readOnly": true, + "title": "Notification template", + "type": "integer" + }, + "notification_type": { + "enum": [ + "email", + "grafana", + "irc", + "mattermost", + "pagerduty", + "rocketchat", + "slack", + "twilio", + "webhook" + ], + "title": "Notification type", + "type": "string", + "x-nullable": true + }, + "notifications_sent": { + "readOnly": true, + "title": "Notifications sent", + "type": "integer" + }, + "recipients": { + "minLength": 1, + "readOnly": true, + "title": "Recipients", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "status": { + "enum": [ + "pending", + "successful", + "failed" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "subject": { + "minLength": 1, + "readOnly": true, + "title": "Subject", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "notification_type" + ], + "type": "object" + }, + "NotificationTemplate": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "messages": { + "default": { + "error": null, + "started": null, + "success": null, + "workflow_approval": null + }, + "description": "Optional custom messages for notification template.", + "title": "Messages", + "type": "object", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "notification_configuration": { + "default": {}, + "title": "Notification configuration", + "type": "object" + }, + "notification_type": { + "enum": [ + "email", + "grafana", + "irc", + "mattermost", + "pagerduty", + "rocketchat", + "slack", + "twilio", + "webhook" + ], + "title": "Notification type", + "type": "string", + "x-nullable": true + }, + "organization": { + "title": "Organization", + "type": "integer", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name", + "organization", + "notification_type" + ], + "type": "object" + }, + "OAuth2Application": { + "properties": { + "authorization_grant_type": { + "description": "The Grant type the user must use for acquire tokens for this application.", + "enum": [ + "authorization-code", + "password" + ], + "title": "Authorization Grant Type", + "type": "string", + "x-nullable": true + }, + "client_id": { + "minLength": 1, + "readOnly": true, + "title": "Client id", + "type": "string", + "x-nullable": true + }, + "client_secret": { + "description": "Used for more stringent verification of access to an application when creating a token.", + "minLength": 1, + "readOnly": true, + "title": "Client Secret", + "type": "string", + "x-nullable": true + }, + "client_type": { + "description": "Set to Public or Confidential depending on how secure the client device is.", + "enum": [ + "confidential", + "public" + ], + "title": "Client Type", + "type": "string", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "Id", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 255, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "organization": { + "description": "Organization containing this application.", + "title": "Organization", + "type": "integer" + }, + "redirect_uris": { + "description": "Allowed URIs list, space separated", + "title": "Redirect URIs", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "skip_authorization": { + "default": false, + "description": "Set True to skip authorization step for completely trusted applications.", + "title": "Skip Authorization", + "type": "boolean", + "x-nullable": true + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name", + "client_type", + "authorization_grant_type", + "organization" + ], + "type": "object" + }, + "OAuth2Token": { + "properties": { + "application": { + "title": "Application", + "type": "integer", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "expires": { + "format": "date-time", + "readOnly": true, + "title": "Expires", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "Id", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "refresh_token": { + "readOnly": true, + "title": "Refresh token", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scope": { + "default": "write", + "description": "Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write'].", + "title": "Scope", + "type": "string", + "x-nullable": true + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "token": { + "readOnly": true, + "title": "Token", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "user": { + "description": "The user representing the token owner", + "readOnly": true, + "title": "User", + "type": "integer" + } + }, + "type": "object" + }, + "OAuth2TokenDetail": { + "properties": { + "application": { + "readOnly": true, + "title": "Application", + "type": "integer", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "expires": { + "format": "date-time", + "readOnly": true, + "title": "Expires", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "Id", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "refresh_token": { + "readOnly": true, + "title": "Refresh token", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scope": { + "default": "write", + "description": "Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write'].", + "title": "Scope", + "type": "string", + "x-nullable": true + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "token": { + "readOnly": true, + "title": "Token", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "user": { + "description": "The user representing the token owner", + "readOnly": true, + "title": "User", + "type": "integer" + } + }, + "type": "object" + }, + "Organization": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "custom_virtualenv": { + "description": "Local absolute file path containing a custom Python virtualenv to use", + "minLength": 1, + "readOnly": true, + "title": "Custom virtualenv", + "type": "string", + "x-nullable": true + }, + "default_environment": { + "description": "The default execution environment for jobs run by this organization.", + "title": "Default environment", + "type": "integer", + "x-nullable": true + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "max_hosts": { + "default": 0, + "description": "Maximum number of hosts allowed to be managed by this organization.", + "minimum": 0, + "title": "Max hosts", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "OrganizationCredentialSerializerCreate": { + "properties": { + "cloud": { + "readOnly": true, + "title": "Cloud", + "type": "string" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credential_type": { + "description": "Specify the type of credential you want to create. Refer to the documentation for details on each type.", + "title": "Credential Type", + "type": "integer" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inputs": { + "default": {}, + "description": "Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax.", + "title": "Inputs", + "type": "object" + }, + "kind": { + "readOnly": true, + "title": "Kind", + "type": "string" + }, + "kubernetes": { + "readOnly": true, + "title": "Kubernetes", + "type": "string" + }, + "managed": { + "readOnly": true, + "title": "Managed", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "organization": { + "description": "Inherit permissions from organization roles. If provided on creation, do not give either user or team.", + "title": "Organization", + "type": "integer", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name", + "credential_type" + ], + "type": "object" + }, + "Project": { + "properties": { + "allow_override": { + "default": false, + "description": "Allow changing the SCM branch or revision in a job template that uses this project.", + "title": "Allow override", + "type": "boolean", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credential": { + "title": "Credential", + "type": "integer", + "x-nullable": true + }, + "custom_virtualenv": { + "description": "Local absolute file path containing a custom Python virtualenv to use", + "minLength": 1, + "readOnly": true, + "title": "Custom virtualenv", + "type": "string", + "x-nullable": true + }, + "default_environment": { + "description": "The default execution environment for jobs run using this project.", + "title": "Default environment", + "type": "integer", + "x-nullable": true + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "last_job_failed": { + "readOnly": true, + "title": "Last job failed", + "type": "boolean" + }, + "last_job_run": { + "format": "date-time", + "readOnly": true, + "title": "Last job run", + "type": "string", + "x-nullable": true + }, + "last_update_failed": { + "readOnly": true, + "title": "Last update failed", + "type": "boolean" + }, + "last_updated": { + "format": "date-time", + "readOnly": true, + "title": "Last updated", + "type": "string" + }, + "local_path": { + "description": "Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project.", + "maxLength": 1024, + "title": "Local path", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "next_job_run": { + "format": "date-time", + "readOnly": true, + "title": "Next job run", + "type": "string", + "x-nullable": true + }, + "organization": { + "description": "The organization used to determine access to this template.", + "title": "Organization", + "type": "integer", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scm_branch": { + "default": "", + "description": "Specific branch, tag or commit to checkout.", + "maxLength": 256, + "title": "SCM Branch", + "type": "string", + "x-nullable": true + }, + "scm_clean": { + "default": false, + "description": "Discard any local changes before syncing the project.", + "title": "Scm clean", + "type": "boolean", + "x-nullable": true + }, + "scm_delete_on_update": { + "default": false, + "description": "Delete the project before syncing.", + "title": "Scm delete on update", + "type": "boolean", + "x-nullable": true + }, + "scm_refspec": { + "default": "", + "description": "For git projects, an additional refspec to fetch.", + "maxLength": 1024, + "title": "SCM refspec", + "type": "string", + "x-nullable": true + }, + "scm_revision": { + "description": "The last revision fetched by a project update", + "minLength": 1, + "readOnly": true, + "title": "SCM Revision", + "type": "string" + }, + "scm_track_submodules": { + "default": false, + "description": "Track submodules latest commits on defined branch.", + "title": "Scm track submodules", + "type": "boolean", + "x-nullable": true + }, + "scm_type": { + "default": "", + "description": "Specifies the source control system used to store the project.", + "enum": [ + "", + "git", + "svn", + "insights", + "archive" + ], + "title": "SCM Type", + "type": "string", + "x-nullable": true + }, + "scm_update_cache_timeout": { + "default": 0, + "description": "The number of seconds after the last project update ran that a new project update will be launched as a job dependency.", + "minimum": 0, + "title": "Scm update cache timeout", + "type": "integer" + }, + "scm_update_on_launch": { + "default": false, + "description": "Update the project when a job is launched that uses the project.", + "title": "Scm update on launch", + "type": "boolean", + "x-nullable": true + }, + "scm_url": { + "default": "", + "description": "The location where the project is stored.", + "maxLength": 1024, + "title": "SCM URL", + "type": "string", + "x-nullable": true + }, + "signature_validation_credential": { + "description": "An optional credential used for validating files in the project against unexpected changes.", + "title": "Signature validation credential", + "type": "integer", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled", + "never updated", + "ok", + "missing" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timeout": { + "default": 0, + "description": "The amount of time (in seconds) to run before the task is canceled.", + "title": "Timeout", + "type": "integer" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "ProjectInventories": { + "properties": { + "inventory_files": { + "description": "Array of inventory files and directories available within this project, not comprehensive.", + "readOnly": true, + "title": "Inventory files", + "type": "string" + } + }, + "type": "object" + }, + "ProjectPlaybooks": { + "properties": { + "playbooks": { + "description": "Array of playbooks available within this project.", + "readOnly": true, + "title": "Playbooks", + "type": "string" + } + }, + "type": "object" + }, + "ProjectUpdateCancel": { + "properties": { + "can_cancel": { + "readOnly": true, + "title": "Can cancel", + "type": "boolean" + } + }, + "type": "object" + }, + "ProjectUpdateDetail": { + "properties": { + "canceled_on": { + "description": "The date and time when the cancel request was sent.", + "format": "date-time", + "readOnly": true, + "title": "Canceled on", + "type": "string", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credential": { + "title": "Credential", + "type": "integer", + "x-nullable": true + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "elapsed": { + "description": "Elapsed time in seconds that the job ran.", + "format": "decimal", + "readOnly": true, + "title": "Elapsed", + "type": "string" + }, + "event_processing_finished": { + "description": "Indicates whether all of the events generated by this unified job have been saved to the database.", + "readOnly": true, + "title": "Event processing finished", + "type": "boolean" + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "execution_node": { + "description": "The node the job executed on.", + "minLength": 1, + "readOnly": true, + "title": "Execution node", + "type": "string" + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "finished": { + "description": "The date and time the job finished execution.", + "format": "date-time", + "readOnly": true, + "title": "Finished", + "type": "string", + "x-nullable": true + }, + "host_status_counts": { + "description": "Playbook stats from the Ansible playbook_on_stats event.", + "readOnly": true, + "title": "Host status counts", + "type": "object", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "job_args": { + "minLength": 1, + "readOnly": true, + "title": "Job args", + "type": "string" + }, + "job_cwd": { + "minLength": 1, + "readOnly": true, + "title": "Job cwd", + "type": "string" + }, + "job_env": { + "readOnly": true, + "title": "job_env", + "type": "object" + }, + "job_explanation": { + "description": "A status field to indicate the state of the job if it wasn't able to run and capture stdout", + "minLength": 1, + "readOnly": true, + "title": "Job explanation", + "type": "string" + }, + "job_tags": { + "default": "", + "description": "Parts of the project update playbook that will be run.", + "maxLength": 1024, + "title": "Job tags", + "type": "string", + "x-nullable": true + }, + "job_type": { + "default": "check", + "enum": [ + "run", + "check" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "launch_type": { + "enum": [ + "manual", + "relaunch", + "callback", + "scheduled", + "dependency", + "workflow", + "webhook", + "sync", + "scm" + ], + "readOnly": true, + "title": "Launch type", + "type": "string" + }, + "launched_by": { + "readOnly": true, + "title": "Launched by", + "type": "string" + }, + "local_path": { + "description": "Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project.", + "maxLength": 1024, + "title": "Local path", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "playbook_counts": { + "description": "A count of all plays and tasks for the job run.", + "readOnly": true, + "title": "Playbook counts", + "type": "string" + }, + "project": { + "readOnly": true, + "title": "Project", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "result_traceback": { + "minLength": 1, + "readOnly": true, + "title": "Result traceback", + "type": "string" + }, + "scm_branch": { + "default": "", + "description": "Specific branch, tag or commit to checkout.", + "maxLength": 256, + "title": "SCM Branch", + "type": "string", + "x-nullable": true + }, + "scm_clean": { + "default": false, + "description": "Discard any local changes before syncing the project.", + "title": "Scm clean", + "type": "boolean", + "x-nullable": true + }, + "scm_delete_on_update": { + "default": false, + "description": "Delete the project before syncing.", + "title": "Scm delete on update", + "type": "boolean", + "x-nullable": true + }, + "scm_refspec": { + "default": "", + "description": "For git projects, an additional refspec to fetch.", + "maxLength": 1024, + "title": "SCM refspec", + "type": "string", + "x-nullable": true + }, + "scm_revision": { + "description": "The SCM Revision discovered by this update for the given project and branch.", + "minLength": 1, + "readOnly": true, + "title": "SCM Revision", + "type": "string" + }, + "scm_track_submodules": { + "default": false, + "description": "Track submodules latest commits on defined branch.", + "title": "Scm track submodules", + "type": "boolean", + "x-nullable": true + }, + "scm_type": { + "default": "", + "description": "Specifies the source control system used to store the project.", + "enum": [ + "", + "git", + "svn", + "insights", + "archive" + ], + "title": "SCM Type", + "type": "string", + "x-nullable": true + }, + "scm_url": { + "default": "", + "description": "The location where the project is stored.", + "maxLength": 1024, + "title": "SCM URL", + "type": "string", + "x-nullable": true + }, + "started": { + "description": "The date and time the job was queued for starting.", + "format": "date-time", + "readOnly": true, + "title": "Started", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timeout": { + "default": 0, + "description": "The amount of time (in seconds) to run before the task is canceled.", + "title": "Timeout", + "type": "integer" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "readOnly": true, + "title": "unified job template", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "work_unit_id": { + "description": "The Receptor work unit ID associated with this job.", + "minLength": 1, + "readOnly": true, + "title": "Work unit id", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "ProjectUpdateEvent": { + "properties": { + "changed": { + "readOnly": true, + "title": "Changed", + "type": "boolean" + }, + "counter": { + "minimum": 0, + "readOnly": true, + "title": "Counter", + "type": "integer" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "end_line": { + "minimum": 0, + "readOnly": true, + "title": "End line", + "type": "integer" + }, + "event": { + "enum": [ + "runner_on_failed", + "runner_on_start", + "runner_on_ok", + "runner_on_error", + "runner_on_skipped", + "runner_on_unreachable", + "runner_on_no_hosts", + "runner_on_async_poll", + "runner_on_async_ok", + "runner_on_async_failed", + "runner_item_on_ok", + "runner_item_on_failed", + "runner_item_on_skipped", + "runner_retry", + "runner_on_file_diff", + "playbook_on_start", + "playbook_on_notify", + "playbook_on_include", + "playbook_on_no_hosts_matched", + "playbook_on_no_hosts_remaining", + "playbook_on_task_start", + "playbook_on_vars_prompt", + "playbook_on_setup", + "playbook_on_import_for_host", + "playbook_on_not_import_for_host", + "playbook_on_play_start", + "playbook_on_stats", + "debug", + "verbose", + "deprecated", + "warning", + "system_warning", + "error" + ], + "title": "Event", + "type": "string", + "x-nullable": true + }, + "event_data": { + "readOnly": true, + "title": "Event data", + "type": "string" + }, + "event_display": { + "minLength": 1, + "readOnly": true, + "title": "Event display", + "type": "string" + }, + "event_level": { + "readOnly": true, + "title": "Event level", + "type": "integer" + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "host_name": { + "readOnly": true, + "title": "Host name", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "play": { + "minLength": 1, + "readOnly": true, + "title": "Play", + "type": "string" + }, + "playbook": { + "minLength": 1, + "readOnly": true, + "title": "Playbook", + "type": "string" + }, + "project_update": { + "readOnly": true, + "title": "Project update", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "role": { + "minLength": 1, + "readOnly": true, + "title": "Role", + "type": "string" + }, + "start_line": { + "minimum": 0, + "readOnly": true, + "title": "Start line", + "type": "integer" + }, + "stdout": { + "readOnly": true, + "title": "Stdout", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "task": { + "minLength": 1, + "readOnly": true, + "title": "Task", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "uuid": { + "minLength": 1, + "readOnly": true, + "title": "Uuid", + "type": "string" + }, + "verbosity": { + "minimum": 0, + "readOnly": true, + "title": "Verbosity", + "type": "integer" + } + }, + "required": [ + "event" + ], + "type": "object" + }, + "ProjectUpdateList": { + "properties": { + "canceled_on": { + "description": "The date and time when the cancel request was sent.", + "format": "date-time", + "readOnly": true, + "title": "Canceled on", + "type": "string", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credential": { + "title": "Credential", + "type": "integer", + "x-nullable": true + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "elapsed": { + "description": "Elapsed time in seconds that the job ran.", + "format": "decimal", + "readOnly": true, + "title": "Elapsed", + "type": "string" + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "execution_node": { + "description": "The node the job executed on.", + "minLength": 1, + "readOnly": true, + "title": "Execution node", + "type": "string" + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "finished": { + "description": "The date and time the job finished execution.", + "format": "date-time", + "readOnly": true, + "title": "Finished", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "job_explanation": { + "description": "A status field to indicate the state of the job if it wasn't able to run and capture stdout", + "minLength": 1, + "readOnly": true, + "title": "Job explanation", + "type": "string" + }, + "job_tags": { + "default": "", + "description": "Parts of the project update playbook that will be run.", + "maxLength": 1024, + "title": "Job tags", + "type": "string", + "x-nullable": true + }, + "job_type": { + "default": "check", + "enum": [ + "run", + "check" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "launch_type": { + "enum": [ + "manual", + "relaunch", + "callback", + "scheduled", + "dependency", + "workflow", + "webhook", + "sync", + "scm" + ], + "readOnly": true, + "title": "Launch type", + "type": "string" + }, + "launched_by": { + "readOnly": true, + "title": "Launched by", + "type": "string" + }, + "local_path": { + "description": "Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project.", + "maxLength": 1024, + "title": "Local path", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "project": { + "readOnly": true, + "title": "Project", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scm_branch": { + "default": "", + "description": "Specific branch, tag or commit to checkout.", + "maxLength": 256, + "title": "SCM Branch", + "type": "string", + "x-nullable": true + }, + "scm_clean": { + "default": false, + "description": "Discard any local changes before syncing the project.", + "title": "Scm clean", + "type": "boolean", + "x-nullable": true + }, + "scm_delete_on_update": { + "default": false, + "description": "Delete the project before syncing.", + "title": "Scm delete on update", + "type": "boolean", + "x-nullable": true + }, + "scm_refspec": { + "default": "", + "description": "For git projects, an additional refspec to fetch.", + "maxLength": 1024, + "title": "SCM refspec", + "type": "string", + "x-nullable": true + }, + "scm_revision": { + "description": "The SCM Revision discovered by this update for the given project and branch.", + "minLength": 1, + "readOnly": true, + "title": "SCM Revision", + "type": "string" + }, + "scm_track_submodules": { + "default": false, + "description": "Track submodules latest commits on defined branch.", + "title": "Scm track submodules", + "type": "boolean", + "x-nullable": true + }, + "scm_type": { + "default": "", + "description": "Specifies the source control system used to store the project.", + "enum": [ + "", + "git", + "svn", + "insights", + "archive" + ], + "title": "SCM Type", + "type": "string", + "x-nullable": true + }, + "scm_url": { + "default": "", + "description": "The location where the project is stored.", + "maxLength": 1024, + "title": "SCM URL", + "type": "string", + "x-nullable": true + }, + "started": { + "description": "The date and time the job was queued for starting.", + "format": "date-time", + "readOnly": true, + "title": "Started", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timeout": { + "default": 0, + "description": "The amount of time (in seconds) to run before the task is canceled.", + "title": "Timeout", + "type": "integer" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "readOnly": true, + "title": "unified job template", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "work_unit_id": { + "description": "The Receptor work unit ID associated with this job.", + "minLength": 1, + "readOnly": true, + "title": "Work unit id", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "ProjectUpdateView": { + "properties": { + "can_update": { + "readOnly": true, + "title": "Can update", + "type": "boolean" + } + }, + "type": "object" + }, + "ResourceAccessListElement": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "email": { + "maxLength": 254, + "title": "Email address", + "type": "string", + "x-nullable": true + }, + "external_account": { + "description": "Set if the account is managed by an external service", + "readOnly": true, + "title": "External account", + "type": "string" + }, + "first_name": { + "maxLength": 150, + "title": "First name", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "is_superuser": { + "default": false, + "description": "Designates that this user has all permissions without explicitly assigning them.", + "title": "Superuser status", + "type": "boolean", + "x-nullable": true + }, + "is_system_auditor": { + "default": false, + "title": "Is system auditor", + "type": "boolean" + }, + "last_login": { + "format": "date-time", + "readOnly": true, + "title": "Last login", + "type": "string", + "x-nullable": true + }, + "last_name": { + "maxLength": 150, + "title": "Last name", + "type": "string", + "x-nullable": true + }, + "ldap_dn": { + "minLength": 1, + "readOnly": true, + "title": "Ldap dn", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "password": { + "default": "", + "description": "Field used to change the password.", + "minLength": 1, + "title": "Password", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "username": { + "description": "Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.", + "maxLength": 150, + "minLength": 1, + "pattern": "^[\\w.@+-]+$", + "title": "Username", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "username" + ], + "type": "object" + }, + "Role": { + "properties": { + "description": { + "readOnly": true, + "title": "Description", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "name": { + "readOnly": true, + "title": "Name", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "type": "object" + }, + "RoleSerializerWithParentAccess": { + "properties": { + "description": { + "readOnly": true, + "title": "Description", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "name": { + "readOnly": true, + "title": "Name", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "type": "object" + }, + "Schedule": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "diff_mode": { + "title": "Diff mode", + "type": "boolean", + "x-nullable": true + }, + "dtend": { + "description": "The last occurrence of the schedule occurs before this time, aftewards the schedule expires.", + "format": "date-time", + "readOnly": true, + "title": "Dtend", + "type": "string", + "x-nullable": true + }, + "dtstart": { + "description": "The first occurrence of the schedule occurs on or after this time.", + "format": "date-time", + "readOnly": true, + "title": "Dtstart", + "type": "string", + "x-nullable": true + }, + "enabled": { + "default": true, + "description": "Enables processing of this schedule.", + "title": "Enabled", + "type": "boolean", + "x-nullable": true + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "extra_data": { + "default": {}, + "title": "Extra data", + "type": "object" + }, + "forks": { + "minimum": 0, + "title": "Forks", + "type": "integer", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inventory": { + "description": "Inventory applied as a prompt, assuming job template prompts for inventory", + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "job_slice_count": { + "minimum": 0, + "title": "Job slice count", + "type": "integer", + "x-nullable": true + }, + "job_tags": { + "title": "Job tags", + "type": "string", + "x-nullable": true + }, + "job_type": { + "enum": [ + "run", + "check" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "limit": { + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "next_run": { + "description": "The next time that the scheduled action will run.", + "format": "date-time", + "readOnly": true, + "title": "Next run", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "rrule": { + "description": "A value representing the schedules iCal recurrence rule.", + "minLength": 1, + "title": "Rrule", + "type": "string", + "x-nullable": true + }, + "scm_branch": { + "title": "Scm branch", + "type": "string", + "x-nullable": true + }, + "skip_tags": { + "title": "Skip tags", + "type": "string", + "x-nullable": true + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timeout": { + "title": "Timeout", + "type": "integer", + "x-nullable": true + }, + "timezone": { + "description": "The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field.", + "readOnly": true, + "title": "Timezone", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "title": "Unified job template", + "type": "integer" + }, + "until": { + "description": "The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned", + "readOnly": true, + "title": "Until", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "verbosity": { + "enum": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "title": "Verbosity", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "rrule", + "name", + "unified_job_template" + ], + "type": "object" + }, + "SchedulePreview": { + "properties": { + "rrule": { + "description": "A value representing the schedules iCal recurrence rule.", + "minLength": 1, + "title": "Rrule", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "rrule" + ], + "type": "object" + }, + "SettingCategory": { + "properties": { + "name": { + "minLength": 1, + "readOnly": true, + "title": "Name", + "type": "string" + }, + "slug": { + "minLength": 1, + "readOnly": true, + "title": "Slug", + "type": "string" + }, + "url": { + "minLength": 1, + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "type": "object" + }, + "SettingSingleton": { + "properties": { + "ACTIVITY_STREAM_ENABLED": { + "default": true, + "description": "Enable capturing activity for the activity stream.", + "title": "Enable Activity Stream", + "type": "boolean" + }, + "ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC": { + "default": false, + "description": "Enable capturing activity for the activity stream when running inventory sync.", + "title": "Enable Activity Stream for Inventory Sync", + "type": "boolean" + }, + "AD_HOC_COMMANDS": { + "default": [ + "command", + "shell", + "yum", + "apt", + "apt_key", + "apt_repository", + "apt_rpm", + "service", + "group", + "user", + "mount", + "ping", + "selinux", + "setup", + "win_ping", + "win_service", + "win_updates", + "win_group", + "win_user" + ], + "description": "List of modules allowed to be used by ad-hoc jobs.", + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "ALLOW_JINJA_IN_EXTRA_VARS": { + "default": "template", + "description": "Ansible allows variable substitution via the Jinja2 templating language for --extra-vars. This poses a potential security risk where users with the ability to specify extra vars at job launch time can use Jinja2 templates to run arbitrary Python. It is recommended that this value be set to \"template\" or \"never\".", + "enum": [ + "always", + "never", + "template" + ], + "title": "When can extra variables contain Jinja templates?", + "type": "string" + }, + "ALLOW_METRICS_FOR_ANONYMOUS_USERS": { + "default": false, + "description": "If true, anonymous users are allowed to poll metrics.", + "title": "Allow anonymous users to poll metrics", + "type": "boolean" + }, + "ALLOW_OAUTH2_FOR_EXTERNAL_USERS": { + "default": false, + "description": "For security reasons, users from external auth providers (LDAP, SAML, SSO, Radius, and others) are not allowed to create OAuth2 tokens. To change this behavior, enable this setting. Existing tokens will not be deleted when this setting is toggled off.", + "title": "Allow External Users to Create OAuth2 Tokens", + "type": "boolean" + }, + "ANSIBLE_FACT_CACHE_TIMEOUT": { + "default": 0, + "description": "Maximum time, in seconds, that stored Ansible facts are considered valid since the last time they were modified. Only valid, non-stale, facts will be accessible by a playbook. Note, this does not influence the deletion of ansible_facts from the database. Use a value of 0 to indicate that no timeout should be imposed.", + "minimum": 0, + "title": "Per-Host Ansible Fact Cache Timeout", + "type": "integer" + }, + "API_400_ERROR_LOG_FORMAT": { + "default": "status {status_code} received by user {user_name} attempting to access {url_path} from {remote_addr}", + "description": "The format of logged messages when an API 4XX error occurs, the following variables will be substituted: \nstatus_code - The HTTP status code of the error\nuser_name - The user name attempting to use the API\nurl_path - The URL path to the API endpoint called\nremote_addr - The remote address seen for the user\nerror - The error set by the api endpoint\nVariables need to be in the format {}.", + "minLength": 1, + "title": "Log Format For API 4XX Errors", + "type": "string" + }, + "AUTHENTICATION_BACKENDS": { + "default": [ + "awx.sso.backends.TACACSPlusBackend", + "awx.main.backends.AWXModelBackend" + ], + "description": "List of authentication backends that are enabled based on license features and other authentication settings.", + "items": { + "minLength": 1, + "type": "string" + }, + "readOnly": true, + "type": "array" + }, + "AUTH_BASIC_ENABLED": { + "default": true, + "description": "Enable HTTP Basic Auth for the API Browser.", + "title": "Enable HTTP Basic Auth", + "type": "boolean" + }, + "AUTH_LDAP_1_BIND_DN": { + "default": "", + "description": "DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax.", + "title": "LDAP Bind DN", + "type": "string" + }, + "AUTH_LDAP_1_BIND_PASSWORD": { + "default": "", + "description": "Password used to bind LDAP user account.", + "title": "LDAP Bind Password", + "type": "string" + }, + "AUTH_LDAP_1_CONNECTION_OPTIONS": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": { + "OPT_NETWORK_TIMEOUT": 30, + "OPT_REFERRALS": 0 + }, + "description": "Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. \"OPT_REFERRALS\"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set.", + "title": "LDAP Connection Options", + "type": "object" + }, + "AUTH_LDAP_1_DENY_GROUP": { + "description": "Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported.", + "title": "LDAP Deny Group", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_1_GROUP_SEARCH": { + "default": [], + "description": "Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion.", + "items": { + "type": "string", + "x-nullable": true + }, + "type": "array" + }, + "AUTH_LDAP_1_GROUP_TYPE": { + "default": "MemberDNGroupType", + "description": "The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups", + "enum": [ + "PosixGroupType", + "GroupOfNamesType", + "GroupOfUniqueNamesType", + "ActiveDirectoryGroupType", + "OrganizationalRoleGroupType", + "MemberDNGroupType", + "NestedGroupOfNamesType", + "NestedGroupOfUniqueNamesType", + "NestedActiveDirectoryGroupType", + "NestedOrganizationalRoleGroupType", + "NestedMemberDNGroupType", + "PosixUIDGroupType" + ], + "title": "LDAP Group Type", + "type": "string" + }, + "AUTH_LDAP_1_GROUP_TYPE_PARAMS": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": { + "member_attr": "member", + "name_attr": "cn" + }, + "description": "Key value parameters to send the chosen group type init method.", + "title": "LDAP Group Type Parameters", + "type": "object" + }, + "AUTH_LDAP_1_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "default": {}, + "description": "Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation.", + "title": "LDAP Organization Map", + "type": "object" + }, + "AUTH_LDAP_1_REQUIRE_GROUP": { + "description": "Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported.", + "title": "LDAP Require Group", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_1_SERVER_URI": { + "default": "", + "description": "URI to connect to LDAP server, such as \"ldap://ldap.example.com:389\" (non-SSL) or \"ldaps://ldap.example.com:636\" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty.", + "title": "LDAP Server URI", + "type": "string" + }, + "AUTH_LDAP_1_START_TLS": { + "default": false, + "description": "Whether to enable TLS when the LDAP connection is not using SSL.", + "title": "LDAP Start TLS", + "type": "boolean" + }, + "AUTH_LDAP_1_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "default": {}, + "description": "Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation.", + "title": "LDAP Team Map", + "type": "object" + }, + "AUTH_LDAP_1_USER_ATTR_MAP": { + "additionalProperties": { + "minLength": 1, + "type": "string" + }, + "default": {}, + "description": "Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details.", + "title": "LDAP User Attribute Map", + "type": "object" + }, + "AUTH_LDAP_1_USER_DN_TEMPLATE": { + "description": "Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH.", + "title": "LDAP User DN Template", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_1_USER_FLAGS_BY_GROUP": { + "additionalProperties": { + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "default": {}, + "description": "Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail.", + "title": "LDAP User Flags By Group", + "type": "object" + }, + "AUTH_LDAP_1_USER_SEARCH": { + "default": [], + "description": "LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of \"LDAPUnion\" is possible. See the documentation for details.", + "items": { + "type": "string", + "x-nullable": true + }, + "type": "array" + }, + "AUTH_LDAP_2_BIND_DN": { + "default": "", + "description": "DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax.", + "title": "LDAP Bind DN", + "type": "string" + }, + "AUTH_LDAP_2_BIND_PASSWORD": { + "default": "", + "description": "Password used to bind LDAP user account.", + "title": "LDAP Bind Password", + "type": "string" + }, + "AUTH_LDAP_2_CONNECTION_OPTIONS": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": { + "OPT_NETWORK_TIMEOUT": 30, + "OPT_REFERRALS": 0 + }, + "description": "Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. \"OPT_REFERRALS\"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set.", + "title": "LDAP Connection Options", + "type": "object" + }, + "AUTH_LDAP_2_DENY_GROUP": { + "description": "Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported.", + "title": "LDAP Deny Group", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_2_GROUP_SEARCH": { + "default": [], + "description": "Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion.", + "items": { + "type": "string", + "x-nullable": true + }, + "type": "array" + }, + "AUTH_LDAP_2_GROUP_TYPE": { + "default": "MemberDNGroupType", + "description": "The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups", + "enum": [ + "PosixGroupType", + "GroupOfNamesType", + "GroupOfUniqueNamesType", + "ActiveDirectoryGroupType", + "OrganizationalRoleGroupType", + "MemberDNGroupType", + "NestedGroupOfNamesType", + "NestedGroupOfUniqueNamesType", + "NestedActiveDirectoryGroupType", + "NestedOrganizationalRoleGroupType", + "NestedMemberDNGroupType", + "PosixUIDGroupType" + ], + "title": "LDAP Group Type", + "type": "string" + }, + "AUTH_LDAP_2_GROUP_TYPE_PARAMS": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": { + "member_attr": "member", + "name_attr": "cn" + }, + "description": "Key value parameters to send the chosen group type init method.", + "title": "LDAP Group Type Parameters", + "type": "object" + }, + "AUTH_LDAP_2_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "default": {}, + "description": "Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation.", + "title": "LDAP Organization Map", + "type": "object" + }, + "AUTH_LDAP_2_REQUIRE_GROUP": { + "description": "Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported.", + "title": "LDAP Require Group", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_2_SERVER_URI": { + "default": "", + "description": "URI to connect to LDAP server, such as \"ldap://ldap.example.com:389\" (non-SSL) or \"ldaps://ldap.example.com:636\" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty.", + "title": "LDAP Server URI", + "type": "string" + }, + "AUTH_LDAP_2_START_TLS": { + "default": false, + "description": "Whether to enable TLS when the LDAP connection is not using SSL.", + "title": "LDAP Start TLS", + "type": "boolean" + }, + "AUTH_LDAP_2_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "default": {}, + "description": "Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation.", + "title": "LDAP Team Map", + "type": "object" + }, + "AUTH_LDAP_2_USER_ATTR_MAP": { + "additionalProperties": { + "minLength": 1, + "type": "string" + }, + "default": {}, + "description": "Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details.", + "title": "LDAP User Attribute Map", + "type": "object" + }, + "AUTH_LDAP_2_USER_DN_TEMPLATE": { + "description": "Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH.", + "title": "LDAP User DN Template", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_2_USER_FLAGS_BY_GROUP": { + "additionalProperties": { + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "default": {}, + "description": "Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail.", + "title": "LDAP User Flags By Group", + "type": "object" + }, + "AUTH_LDAP_2_USER_SEARCH": { + "default": [], + "description": "LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of \"LDAPUnion\" is possible. See the documentation for details.", + "items": { + "type": "string", + "x-nullable": true + }, + "type": "array" + }, + "AUTH_LDAP_3_BIND_DN": { + "default": "", + "description": "DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax.", + "title": "LDAP Bind DN", + "type": "string" + }, + "AUTH_LDAP_3_BIND_PASSWORD": { + "default": "", + "description": "Password used to bind LDAP user account.", + "title": "LDAP Bind Password", + "type": "string" + }, + "AUTH_LDAP_3_CONNECTION_OPTIONS": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": { + "OPT_NETWORK_TIMEOUT": 30, + "OPT_REFERRALS": 0 + }, + "description": "Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. \"OPT_REFERRALS\"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set.", + "title": "LDAP Connection Options", + "type": "object" + }, + "AUTH_LDAP_3_DENY_GROUP": { + "description": "Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported.", + "title": "LDAP Deny Group", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_3_GROUP_SEARCH": { + "default": [], + "description": "Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion.", + "items": { + "type": "string", + "x-nullable": true + }, + "type": "array" + }, + "AUTH_LDAP_3_GROUP_TYPE": { + "default": "MemberDNGroupType", + "description": "The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups", + "enum": [ + "PosixGroupType", + "GroupOfNamesType", + "GroupOfUniqueNamesType", + "ActiveDirectoryGroupType", + "OrganizationalRoleGroupType", + "MemberDNGroupType", + "NestedGroupOfNamesType", + "NestedGroupOfUniqueNamesType", + "NestedActiveDirectoryGroupType", + "NestedOrganizationalRoleGroupType", + "NestedMemberDNGroupType", + "PosixUIDGroupType" + ], + "title": "LDAP Group Type", + "type": "string" + }, + "AUTH_LDAP_3_GROUP_TYPE_PARAMS": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": { + "member_attr": "member", + "name_attr": "cn" + }, + "description": "Key value parameters to send the chosen group type init method.", + "title": "LDAP Group Type Parameters", + "type": "object" + }, + "AUTH_LDAP_3_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "default": {}, + "description": "Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation.", + "title": "LDAP Organization Map", + "type": "object" + }, + "AUTH_LDAP_3_REQUIRE_GROUP": { + "description": "Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported.", + "title": "LDAP Require Group", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_3_SERVER_URI": { + "default": "", + "description": "URI to connect to LDAP server, such as \"ldap://ldap.example.com:389\" (non-SSL) or \"ldaps://ldap.example.com:636\" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty.", + "title": "LDAP Server URI", + "type": "string" + }, + "AUTH_LDAP_3_START_TLS": { + "default": false, + "description": "Whether to enable TLS when the LDAP connection is not using SSL.", + "title": "LDAP Start TLS", + "type": "boolean" + }, + "AUTH_LDAP_3_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "default": {}, + "description": "Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation.", + "title": "LDAP Team Map", + "type": "object" + }, + "AUTH_LDAP_3_USER_ATTR_MAP": { + "additionalProperties": { + "minLength": 1, + "type": "string" + }, + "default": {}, + "description": "Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details.", + "title": "LDAP User Attribute Map", + "type": "object" + }, + "AUTH_LDAP_3_USER_DN_TEMPLATE": { + "description": "Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH.", + "title": "LDAP User DN Template", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_3_USER_FLAGS_BY_GROUP": { + "additionalProperties": { + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "default": {}, + "description": "Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail.", + "title": "LDAP User Flags By Group", + "type": "object" + }, + "AUTH_LDAP_3_USER_SEARCH": { + "default": [], + "description": "LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of \"LDAPUnion\" is possible. See the documentation for details.", + "items": { + "type": "string", + "x-nullable": true + }, + "type": "array" + }, + "AUTH_LDAP_4_BIND_DN": { + "default": "", + "description": "DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax.", + "title": "LDAP Bind DN", + "type": "string" + }, + "AUTH_LDAP_4_BIND_PASSWORD": { + "default": "", + "description": "Password used to bind LDAP user account.", + "title": "LDAP Bind Password", + "type": "string" + }, + "AUTH_LDAP_4_CONNECTION_OPTIONS": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": { + "OPT_NETWORK_TIMEOUT": 30, + "OPT_REFERRALS": 0 + }, + "description": "Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. \"OPT_REFERRALS\"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set.", + "title": "LDAP Connection Options", + "type": "object" + }, + "AUTH_LDAP_4_DENY_GROUP": { + "description": "Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported.", + "title": "LDAP Deny Group", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_4_GROUP_SEARCH": { + "default": [], + "description": "Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion.", + "items": { + "type": "string", + "x-nullable": true + }, + "type": "array" + }, + "AUTH_LDAP_4_GROUP_TYPE": { + "default": "MemberDNGroupType", + "description": "The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups", + "enum": [ + "PosixGroupType", + "GroupOfNamesType", + "GroupOfUniqueNamesType", + "ActiveDirectoryGroupType", + "OrganizationalRoleGroupType", + "MemberDNGroupType", + "NestedGroupOfNamesType", + "NestedGroupOfUniqueNamesType", + "NestedActiveDirectoryGroupType", + "NestedOrganizationalRoleGroupType", + "NestedMemberDNGroupType", + "PosixUIDGroupType" + ], + "title": "LDAP Group Type", + "type": "string" + }, + "AUTH_LDAP_4_GROUP_TYPE_PARAMS": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": { + "member_attr": "member", + "name_attr": "cn" + }, + "description": "Key value parameters to send the chosen group type init method.", + "title": "LDAP Group Type Parameters", + "type": "object" + }, + "AUTH_LDAP_4_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "default": {}, + "description": "Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation.", + "title": "LDAP Organization Map", + "type": "object" + }, + "AUTH_LDAP_4_REQUIRE_GROUP": { + "description": "Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported.", + "title": "LDAP Require Group", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_4_SERVER_URI": { + "default": "", + "description": "URI to connect to LDAP server, such as \"ldap://ldap.example.com:389\" (non-SSL) or \"ldaps://ldap.example.com:636\" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty.", + "title": "LDAP Server URI", + "type": "string" + }, + "AUTH_LDAP_4_START_TLS": { + "default": false, + "description": "Whether to enable TLS when the LDAP connection is not using SSL.", + "title": "LDAP Start TLS", + "type": "boolean" + }, + "AUTH_LDAP_4_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "default": {}, + "description": "Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation.", + "title": "LDAP Team Map", + "type": "object" + }, + "AUTH_LDAP_4_USER_ATTR_MAP": { + "additionalProperties": { + "minLength": 1, + "type": "string" + }, + "default": {}, + "description": "Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details.", + "title": "LDAP User Attribute Map", + "type": "object" + }, + "AUTH_LDAP_4_USER_DN_TEMPLATE": { + "description": "Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH.", + "title": "LDAP User DN Template", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_4_USER_FLAGS_BY_GROUP": { + "additionalProperties": { + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "default": {}, + "description": "Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail.", + "title": "LDAP User Flags By Group", + "type": "object" + }, + "AUTH_LDAP_4_USER_SEARCH": { + "default": [], + "description": "LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of \"LDAPUnion\" is possible. See the documentation for details.", + "items": { + "type": "string", + "x-nullable": true + }, + "type": "array" + }, + "AUTH_LDAP_5_BIND_DN": { + "default": "", + "description": "DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax.", + "title": "LDAP Bind DN", + "type": "string" + }, + "AUTH_LDAP_5_BIND_PASSWORD": { + "default": "", + "description": "Password used to bind LDAP user account.", + "title": "LDAP Bind Password", + "type": "string" + }, + "AUTH_LDAP_5_CONNECTION_OPTIONS": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": { + "OPT_NETWORK_TIMEOUT": 30, + "OPT_REFERRALS": 0 + }, + "description": "Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. \"OPT_REFERRALS\"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set.", + "title": "LDAP Connection Options", + "type": "object" + }, + "AUTH_LDAP_5_DENY_GROUP": { + "description": "Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported.", + "title": "LDAP Deny Group", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_5_GROUP_SEARCH": { + "default": [], + "description": "Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion.", + "items": { + "type": "string", + "x-nullable": true + }, + "type": "array" + }, + "AUTH_LDAP_5_GROUP_TYPE": { + "default": "MemberDNGroupType", + "description": "The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups", + "enum": [ + "PosixGroupType", + "GroupOfNamesType", + "GroupOfUniqueNamesType", + "ActiveDirectoryGroupType", + "OrganizationalRoleGroupType", + "MemberDNGroupType", + "NestedGroupOfNamesType", + "NestedGroupOfUniqueNamesType", + "NestedActiveDirectoryGroupType", + "NestedOrganizationalRoleGroupType", + "NestedMemberDNGroupType", + "PosixUIDGroupType" + ], + "title": "LDAP Group Type", + "type": "string" + }, + "AUTH_LDAP_5_GROUP_TYPE_PARAMS": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": { + "member_attr": "member", + "name_attr": "cn" + }, + "description": "Key value parameters to send the chosen group type init method.", + "title": "LDAP Group Type Parameters", + "type": "object" + }, + "AUTH_LDAP_5_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "default": {}, + "description": "Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation.", + "title": "LDAP Organization Map", + "type": "object" + }, + "AUTH_LDAP_5_REQUIRE_GROUP": { + "description": "Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported.", + "title": "LDAP Require Group", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_5_SERVER_URI": { + "default": "", + "description": "URI to connect to LDAP server, such as \"ldap://ldap.example.com:389\" (non-SSL) or \"ldaps://ldap.example.com:636\" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty.", + "title": "LDAP Server URI", + "type": "string" + }, + "AUTH_LDAP_5_START_TLS": { + "default": false, + "description": "Whether to enable TLS when the LDAP connection is not using SSL.", + "title": "LDAP Start TLS", + "type": "boolean" + }, + "AUTH_LDAP_5_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "default": {}, + "description": "Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation.", + "title": "LDAP Team Map", + "type": "object" + }, + "AUTH_LDAP_5_USER_ATTR_MAP": { + "additionalProperties": { + "minLength": 1, + "type": "string" + }, + "default": {}, + "description": "Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details.", + "title": "LDAP User Attribute Map", + "type": "object" + }, + "AUTH_LDAP_5_USER_DN_TEMPLATE": { + "description": "Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH.", + "title": "LDAP User DN Template", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_5_USER_FLAGS_BY_GROUP": { + "additionalProperties": { + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "default": {}, + "description": "Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail.", + "title": "LDAP User Flags By Group", + "type": "object" + }, + "AUTH_LDAP_5_USER_SEARCH": { + "default": [], + "description": "LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of \"LDAPUnion\" is possible. See the documentation for details.", + "items": { + "type": "string", + "x-nullable": true + }, + "type": "array" + }, + "AUTH_LDAP_BIND_DN": { + "default": "", + "description": "DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax.", + "title": "LDAP Bind DN", + "type": "string" + }, + "AUTH_LDAP_BIND_PASSWORD": { + "default": "", + "description": "Password used to bind LDAP user account.", + "title": "LDAP Bind Password", + "type": "string" + }, + "AUTH_LDAP_CONNECTION_OPTIONS": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": { + "OPT_NETWORK_TIMEOUT": 30, + "OPT_REFERRALS": 0 + }, + "description": "Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. \"OPT_REFERRALS\"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set.", + "title": "LDAP Connection Options", + "type": "object" + }, + "AUTH_LDAP_DENY_GROUP": { + "description": "Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported.", + "title": "LDAP Deny Group", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_GROUP_SEARCH": { + "default": [], + "description": "Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion.", + "items": { + "type": "string", + "x-nullable": true + }, + "type": "array" + }, + "AUTH_LDAP_GROUP_TYPE": { + "default": "MemberDNGroupType", + "description": "The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups", + "enum": [ + "PosixGroupType", + "GroupOfNamesType", + "GroupOfUniqueNamesType", + "ActiveDirectoryGroupType", + "OrganizationalRoleGroupType", + "MemberDNGroupType", + "NestedGroupOfNamesType", + "NestedGroupOfUniqueNamesType", + "NestedActiveDirectoryGroupType", + "NestedOrganizationalRoleGroupType", + "NestedMemberDNGroupType", + "PosixUIDGroupType" + ], + "title": "LDAP Group Type", + "type": "string" + }, + "AUTH_LDAP_GROUP_TYPE_PARAMS": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": { + "member_attr": "member", + "name_attr": "cn" + }, + "description": "Key value parameters to send the chosen group type init method.", + "title": "LDAP Group Type Parameters", + "type": "object" + }, + "AUTH_LDAP_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "default": {}, + "description": "Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation.", + "title": "LDAP Organization Map", + "type": "object" + }, + "AUTH_LDAP_REQUIRE_GROUP": { + "description": "Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported.", + "title": "LDAP Require Group", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_SERVER_URI": { + "default": "", + "description": "URI to connect to LDAP server, such as \"ldap://ldap.example.com:389\" (non-SSL) or \"ldaps://ldap.example.com:636\" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty.", + "title": "LDAP Server URI", + "type": "string" + }, + "AUTH_LDAP_START_TLS": { + "default": false, + "description": "Whether to enable TLS when the LDAP connection is not using SSL.", + "title": "LDAP Start TLS", + "type": "boolean" + }, + "AUTH_LDAP_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "default": {}, + "description": "Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation.", + "title": "LDAP Team Map", + "type": "object" + }, + "AUTH_LDAP_USER_ATTR_MAP": { + "additionalProperties": { + "minLength": 1, + "type": "string" + }, + "default": {}, + "description": "Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details.", + "title": "LDAP User Attribute Map", + "type": "object" + }, + "AUTH_LDAP_USER_DN_TEMPLATE": { + "description": "Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH.", + "title": "LDAP User DN Template", + "type": "string", + "x-nullable": true + }, + "AUTH_LDAP_USER_FLAGS_BY_GROUP": { + "additionalProperties": { + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "default": {}, + "description": "Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail.", + "title": "LDAP User Flags By Group", + "type": "object" + }, + "AUTH_LDAP_USER_SEARCH": { + "default": [], + "description": "LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of \"LDAPUnion\" is possible. See the documentation for details.", + "items": { + "type": "string", + "x-nullable": true + }, + "type": "array" + }, + "AUTOMATION_ANALYTICS_GATHER_INTERVAL": { + "default": 14400, + "description": "Interval (in seconds) between data gathering.", + "minimum": 1800, + "title": "Automation Analytics Gather Interval", + "type": "integer" + }, + "AUTOMATION_ANALYTICS_LAST_ENTRIES": { + "default": "", + "title": "Last gathered entries from the data collection service of Automation Analytics", + "type": "string" + }, + "AUTOMATION_ANALYTICS_LAST_GATHER": { + "format": "date-time", + "title": "Last gather date for Automation Analytics.", + "type": "string", + "x-nullable": true + }, + "AUTOMATION_ANALYTICS_URL": { + "default": "https://example.com", + "description": "This setting is used to to configure the upload URL for data collection for Automation Analytics.", + "minLength": 1, + "title": "Automation Analytics upload URL", + "type": "string" + }, + "AWX_ANSIBLE_CALLBACK_PLUGINS": { + "default": [], + "description": "List of paths to search for extra callback plugins to be used when running jobs. Enter one path per line.", + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "AWX_CLEANUP_PATHS": { + "default": true, + "description": "Enable or Disable TMP Dir cleanup", + "title": "Enable or Disable tmp dir cleanup", + "type": "boolean" + }, + "AWX_COLLECTIONS_ENABLED": { + "default": true, + "description": "Allows collections to be dynamically downloaded from a requirements.yml file for SCM projects.", + "title": "Enable Collection(s) Download", + "type": "boolean" + }, + "AWX_ISOLATION_BASE_PATH": { + "default": "/tmp", + "description": "The directory in which the service will create new temporary directories for job execution and isolation (such as credential files).", + "minLength": 1, + "title": "Job execution path", + "type": "string" + }, + "AWX_ISOLATION_SHOW_PATHS": { + "default": [], + "description": "List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. Volumes will be mounted from the execution node to the container. The supported format is HOST-DIR[:CONTAINER-DIR[:OPTIONS]]. ", + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "AWX_MOUNT_ISOLATED_PATHS_ON_K8S": { + "default": false, + "description": "Expose paths via hostPath for the Pods created by a Container Group. HostPath volumes present many security risks, and it is a best practice to avoid the use of HostPaths when possible. ", + "title": "Expose host paths for Container Groups", + "type": "boolean" + }, + "AWX_REQUEST_PROFILE": { + "default": false, + "description": "Debug web request python timing", + "title": "Debug Web Requests", + "type": "boolean" + }, + "AWX_ROLES_ENABLED": { + "default": true, + "description": "Allows roles to be dynamically downloaded from a requirements.yml file for SCM projects.", + "title": "Enable Role Download", + "type": "boolean" + }, + "AWX_RUNNER_KEEPALIVE_SECONDS": { + "default": 0, + "description": "Only applies to jobs running in a Container Group. If not 0, send a message every so-many seconds to keep connection open.", + "title": "K8S Ansible Runner Keep-Alive Message Interval", + "type": "integer" + }, + "AWX_SHOW_PLAYBOOK_LINKS": { + "default": false, + "description": "Follow symbolic links when scanning for playbooks. Be aware that setting this to True can lead to infinite recursion if a link points to a parent directory of itself.", + "title": "Follow symlinks", + "type": "boolean" + }, + "AWX_TASK_ENV": { + "additionalProperties": { + "minLength": 1, + "type": "string" + }, + "default": {}, + "description": "Additional environment variables set for playbook runs, inventory updates, project updates, and notification sending.", + "title": "Extra Environment Variables", + "type": "object" + }, + "BULK_HOST_MAX_CREATE": { + "default": 100, + "description": "Max number of hosts to allow to be created in a single bulk action", + "title": "Max number of hosts to allow to be created in a single bulk action", + "type": "integer" + }, + "BULK_JOB_MAX_LAUNCH": { + "default": 100, + "description": "Max jobs to allow bulk jobs to launch", + "title": "Max jobs to allow bulk jobs to launch", + "type": "integer" + }, + "CLEANUP_HOST_METRICS_LAST_TS": { + "format": "date-time", + "title": "Last cleanup date for HostMetrics", + "type": "string", + "x-nullable": true + }, + "CSRF_TRUSTED_ORIGINS": { + "default": [], + "description": "If the service is behind a reverse proxy/load balancer, use this setting to configure the schema://addresses from which the service should trust Origin header values. ", + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "CUSTOM_LOGIN_INFO": { + "default": "", + "description": "If needed, you can add specific information (such as a legal notice or a disclaimer) to a text box in the login modal using this setting. Any content added must be in plain text or an HTML fragment, as other markup languages are not supported.", + "title": "Custom Login Info", + "type": "string" + }, + "CUSTOM_LOGO": { + "default": "", + "description": "To set up a custom logo, provide a file that you create. For the custom logo to look its best, use a .png file with a transparent background. GIF, PNG and JPEG formats are supported.", + "title": "Custom Logo", + "type": "string" + }, + "CUSTOM_VENV_PATHS": { + "default": [], + "description": "Paths where Tower will look for custom virtual environments (in addition to /var/lib/awx/venv/). Enter one path per line.", + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "DEFAULT_CONTAINER_RUN_OPTIONS": { + "default": [ + "--network", + "slirp4netns:enable_ipv6=true" + ], + "description": "List of options to pass to podman run example: ['--network', 'slirp4netns:enable_ipv6=true', '--log-level', 'debug']", + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "DEFAULT_CONTROL_PLANE_QUEUE_NAME": { + "default": "controlplane", + "minLength": 1, + "readOnly": true, + "title": "The instance group where control plane tasks run", + "type": "string" + }, + "DEFAULT_EXECUTION_ENVIRONMENT": { + "description": "The Execution Environment to be used when one has not been configured for a job template.", + "title": "Global default execution environment", + "type": "integer", + "x-nullable": true + }, + "DEFAULT_EXECUTION_QUEUE_NAME": { + "default": "default", + "minLength": 1, + "readOnly": true, + "title": "The instance group where user jobs run (currently only on non-VM installs)", + "type": "string" + }, + "DEFAULT_INVENTORY_UPDATE_TIMEOUT": { + "default": 0, + "description": "Maximum time in seconds to allow inventory updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual inventory source will override this.", + "minimum": 0, + "title": "Default Inventory Update Timeout", + "type": "integer" + }, + "DEFAULT_JOB_IDLE_TIMEOUT": { + "default": 0, + "description": "If no output is detected from ansible in this number of seconds the execution will be terminated. Use value of 0 to indicate that no idle timeout should be imposed.", + "minimum": 0, + "title": "Default Job Idle Timeout", + "type": "integer" + }, + "DEFAULT_JOB_TIMEOUT": { + "default": 0, + "description": "Maximum time in seconds to allow jobs to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual job template will override this.", + "minimum": 0, + "title": "Default Job Timeout", + "type": "integer" + }, + "DEFAULT_PROJECT_UPDATE_TIMEOUT": { + "default": 0, + "description": "Maximum time in seconds to allow project updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual project will override this.", + "minimum": 0, + "title": "Default Project Update Timeout", + "type": "integer" + }, + "DISABLE_LOCAL_AUTH": { + "default": false, + "description": "Controls whether users are prevented from using the built-in authentication system. You probably want to do this if you are using an LDAP or SAML integration.", + "title": "Disable the built-in authentication system", + "type": "boolean" + }, + "EVENT_STDOUT_MAX_BYTES_DISPLAY": { + "default": 1024, + "description": "Maximum Size of Standard Output in bytes to display for a single job or ad hoc command event. `stdout` will end with `\u2026` when truncated.", + "minimum": 0, + "title": "Job Event Standard Output Maximum Display Size", + "type": "integer" + }, + "GALAXY_IGNORE_CERTS": { + "default": false, + "description": "If set to true, certificate validation will not be done when installing content from any Galaxy server.", + "title": "Ignore Ansible Galaxy SSL Certificate Verification", + "type": "boolean" + }, + "GALAXY_TASK_ENV": { + "additionalProperties": { + "minLength": 1, + "type": "string" + }, + "default": { + "ANSIBLE_FORCE_COLOR": "false", + "GIT_SSH_COMMAND": "ssh -o StrictHostKeyChecking=no" + }, + "description": "Additional environment variables set for invocations of ansible-galaxy within project updates. Useful if you must use a proxy server for ansible-galaxy but not git.", + "title": "Environment Variables for Galaxy Commands", + "type": "object" + }, + "HOST_METRIC_SUMMARY_TASK_LAST_TS": { + "format": "date-time", + "title": "Last computing date of HostMetricSummaryMonthly", + "type": "string", + "x-nullable": true + }, + "INSIGHTS_TRACKING_STATE": { + "default": false, + "description": "Enables the service to gather data on automation and send it to Automation Analytics.", + "title": "Gather data for Automation Analytics", + "type": "boolean" + }, + "INSTALL_UUID": { + "default": "00000000-0000-0000-0000-000000000000", + "minLength": 1, + "readOnly": true, + "title": "Unique identifier for an installation", + "type": "string" + }, + "IS_K8S": { + "default": false, + "description": "Indicates whether the instance is part of a kubernetes-based deployment.", + "readOnly": true, + "title": "Is k8s", + "type": "boolean" + }, + "LICENSE": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": {}, + "description": "The license controls which features and functionality are enabled. Use /api/v2/config/ to update or change the license.", + "readOnly": true, + "title": "License", + "type": "object" + }, + "LOCAL_PASSWORD_MIN_DIGITS": { + "default": 0, + "description": "Minimum number of digit characters required in a local password. 0 means no minimum", + "minimum": 0, + "title": "Minimum number of digit characters in local password", + "type": "integer" + }, + "LOCAL_PASSWORD_MIN_LENGTH": { + "default": 0, + "description": "Minimum number of characters required in a local password. 0 means no minimum", + "minimum": 0, + "title": "Minimum number of characters in local password", + "type": "integer" + }, + "LOCAL_PASSWORD_MIN_SPECIAL": { + "default": 0, + "description": "Minimum number of special characters required in a local password. 0 means no minimum", + "minimum": 0, + "title": "Minimum number of special characters in local password", + "type": "integer" + }, + "LOCAL_PASSWORD_MIN_UPPER": { + "default": 0, + "description": "Minimum number of uppercase characters required in a local password. 0 means no minimum", + "minimum": 0, + "title": "Minimum number of uppercase characters in local password", + "type": "integer" + }, + "LOGIN_REDIRECT_OVERRIDE": { + "default": "", + "description": "URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page.", + "title": "Login redirect override URL", + "type": "string" + }, + "LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB": { + "default": 1, + "description": "Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.", + "minimum": 1, + "title": "Maximum disk persistence for rsyslogd action queuing (in GB)", + "type": "integer" + }, + "LOG_AGGREGATOR_ACTION_QUEUE_SIZE": { + "default": 131072, + "description": "Defines how large the rsyslog action queue can grow in number of messages stored. This can have an impact on memory utilization. When the queue reaches 75% of this number, the queue will start writing to disk (queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and DEBUG messages will start to be discarded (queue.discardMark with queue.discardSeverity=5).", + "minimum": 1, + "title": "Maximum number of messages that can be stored in the log action queue", + "type": "integer" + }, + "LOG_AGGREGATOR_ENABLED": { + "default": false, + "description": "Enable sending logs to external log aggregator.", + "title": "Enable External Logging", + "type": "boolean" + }, + "LOG_AGGREGATOR_HOST": { + "description": "Hostname/IP where external logs will be sent to.", + "minLength": 1, + "title": "Logging Aggregator", + "type": "string", + "x-nullable": true + }, + "LOG_AGGREGATOR_INDIVIDUAL_FACTS": { + "default": false, + "description": "If set, system tracking facts will be sent for each package, service, or other item found in a scan, allowing for greater search query granularity. If unset, facts will be sent as a single dictionary, allowing for greater efficiency in fact processing.", + "title": "Log System Tracking Facts Individually", + "type": "boolean" + }, + "LOG_AGGREGATOR_LEVEL": { + "default": "INFO", + "description": "Level threshold used by log handler. Severities from lowest to highest are DEBUG, INFO, WARNING, ERROR, CRITICAL. Messages less severe than the threshold will be ignored by log handler. (messages under category awx.anlytics ignore this setting)", + "enum": [ + "DEBUG", + "INFO", + "WARNING", + "ERROR", + "CRITICAL" + ], + "title": "Logging Aggregator Level Threshold", + "type": "string" + }, + "LOG_AGGREGATOR_LOGGERS": { + "default": [ + "awx", + "activity_stream", + "job_events", + "system_tracking", + "broadcast_websocket" + ], + "description": "List of loggers that will send HTTP logs to the collector, these can include any or all of: \nawx - service logs\nactivity_stream - activity stream records\njob_events - callback data from Ansible job events\nsystem_tracking - facts gathered from scan jobs\nbroadcast_websocket - errors pertaining to websockets broadcast metrics\n", + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "LOG_AGGREGATOR_MAX_DISK_USAGE_PATH": { + "default": "/var/lib/awx", + "description": "Location to persist logs that should be retried after an outage of the external log aggregator (defaults to /var/lib/awx). Equivalent to the rsyslogd queue.spoolDirectory setting.", + "minLength": 1, + "title": "File system location for rsyslogd disk persistence", + "type": "string" + }, + "LOG_AGGREGATOR_PASSWORD": { + "default": "", + "description": "Password or authentication token for external log aggregator (if required; HTTP/s only).", + "title": "Logging Aggregator Password/Token", + "type": "string" + }, + "LOG_AGGREGATOR_PORT": { + "description": "Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator).", + "title": "Logging Aggregator Port", + "type": "integer", + "x-nullable": true + }, + "LOG_AGGREGATOR_PROTOCOL": { + "default": "https", + "description": "Protocol used to communicate with log aggregator. HTTPS/HTTP assumes HTTPS unless http:// is explicitly used in the Logging Aggregator hostname.", + "enum": [ + "https", + "tcp", + "udp" + ], + "title": "Logging Aggregator Protocol", + "type": "string" + }, + "LOG_AGGREGATOR_RSYSLOGD_DEBUG": { + "default": false, + "description": "Enabled high verbosity debugging for rsyslogd. Useful for debugging connection issues for external log aggregation.", + "title": "Enable rsyslogd debugging", + "type": "boolean" + }, + "LOG_AGGREGATOR_TCP_TIMEOUT": { + "default": 5, + "description": "Number of seconds for a TCP connection to external log aggregator to timeout. Applies to HTTPS and TCP log aggregator protocols.", + "title": "TCP Connection Timeout", + "type": "integer" + }, + "LOG_AGGREGATOR_TOWER_UUID": { + "default": "", + "description": "Useful to uniquely identify instances.", + "title": "Cluster-wide unique identifier.", + "type": "string" + }, + "LOG_AGGREGATOR_TYPE": { + "description": "Format messages for the chosen log aggregator.", + "enum": [ + "logstash", + "splunk", + "loggly", + "sumologic", + "other" + ], + "title": "Logging Aggregator Type", + "type": "string", + "x-nullable": true + }, + "LOG_AGGREGATOR_USERNAME": { + "default": "", + "description": "Username for external log aggregator (if required; HTTP/s only).", + "title": "Logging Aggregator Username", + "type": "string" + }, + "LOG_AGGREGATOR_VERIFY_CERT": { + "default": true, + "description": "Flag to control enable/disable of certificate verification when LOG_AGGREGATOR_PROTOCOL is \"https\". If enabled, the log handler will verify certificate sent by external log aggregator before establishing connection.", + "title": "Enable/disable HTTPS certificate verification", + "type": "boolean" + }, + "MANAGE_ORGANIZATION_AUTH": { + "default": true, + "description": "Controls whether any Organization Admin has the privileges to create and manage users and teams. You may want to disable this ability if you are using an LDAP or SAML integration.", + "title": "Organization Admins Can Manage Users and Teams", + "type": "boolean" + }, + "MAX_FORKS": { + "default": 200, + "description": "Saving a Job Template with more than this number of forks will result in an error. When set to 0, no limit is applied.", + "title": "Maximum number of forks per job", + "type": "integer" + }, + "MAX_UI_JOB_EVENTS": { + "default": 4000, + "description": "Maximum number of job events for the UI to retrieve within a single request.", + "minimum": 100, + "title": "Max Job Events Retrieved by UI", + "type": "integer" + }, + "MAX_WEBSOCKET_EVENT_RATE": { + "default": 30, + "description": "Maximum number of messages to update the UI live job output with per second. Value of 0 means no limit.", + "minimum": 0, + "title": "Job Event Maximum Websocket Messages Per Second", + "type": "integer" + }, + "OAUTH2_PROVIDER": { + "additionalProperties": { + "minimum": 1, + "type": "integer" + }, + "default": { + "ACCESS_TOKEN_EXPIRE_SECONDS": 31536000000, + "AUTHORIZATION_CODE_EXPIRE_SECONDS": 600, + "REFRESH_TOKEN_EXPIRE_SECONDS": 2628000 + }, + "description": "Dictionary for customizing OAuth 2 timeouts, available items are `ACCESS_TOKEN_EXPIRE_SECONDS`, the duration of access tokens in the number of seconds, `AUTHORIZATION_CODE_EXPIRE_SECONDS`, the duration of authorization codes in the number of seconds, and `REFRESH_TOKEN_EXPIRE_SECONDS`, the duration of refresh tokens, after expired access tokens, in the number of seconds.", + "title": "OAuth 2 Timeout Settings", + "type": "object" + }, + "ORG_ADMINS_CAN_SEE_ALL_USERS": { + "default": true, + "description": "Controls whether any Organization Admin can view all users and teams, even those not associated with their Organization.", + "title": "All Users Visible to Organization Admins", + "type": "boolean" + }, + "PENDO_TRACKING_STATE": { + "default": "off", + "description": "Enable or Disable User Analytics Tracking.", + "enum": [ + "off", + "anonymous", + "detailed" + ], + "readOnly": true, + "title": "User Analytics Tracking State", + "type": "string" + }, + "PROJECT_UPDATE_VVV": { + "default": false, + "description": "Adds the CLI -vvv flag to ansible-playbook runs of project_update.yml used for project updates.", + "title": "Run Project Updates With Higher Verbosity", + "type": "boolean" + }, + "PROXY_IP_ALLOWED_LIST": { + "default": [], + "description": "If the service is behind a reverse proxy/load balancer, use this setting to configure the proxy IP addresses from which the service should trust custom REMOTE_HOST_HEADERS header values. If this setting is an empty list (the default), the headers specified by REMOTE_HOST_HEADERS will be trusted unconditionally')", + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "RADIUS_PORT": { + "default": 1812, + "description": "Port of RADIUS server.", + "maximum": 65535, + "minimum": 1, + "title": "RADIUS Port", + "type": "integer" + }, + "RADIUS_SECRET": { + "default": "", + "description": "Shared secret for authenticating to RADIUS server.", + "title": "RADIUS Secret", + "type": "string" + }, + "RADIUS_SERVER": { + "default": "", + "description": "Hostname/IP of RADIUS server. RADIUS authentication is disabled if this setting is empty.", + "title": "RADIUS Server", + "type": "string" + }, + "RECEPTOR_RELEASE_WORK": { + "default": true, + "description": "Release receptor work", + "title": "Release Receptor Work", + "type": "boolean" + }, + "REDHAT_PASSWORD": { + "default": "", + "description": "This password is used to send data to Automation Analytics", + "title": "Red Hat customer password", + "type": "string" + }, + "REDHAT_USERNAME": { + "default": "", + "description": "This username is used to send data to Automation Analytics", + "title": "Red Hat customer username", + "type": "string" + }, + "REMOTE_HOST_HEADERS": { + "default": [ + "REMOTE_ADDR", + "REMOTE_HOST" + ], + "description": "HTTP headers and meta keys to search to determine remote host name or IP. Add additional items to this list, such as \"HTTP_X_FORWARDED_FOR\", if behind a reverse proxy. See the \"Proxy Support\" section of the AAP Installation guide for more details.", + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "SAML_AUTO_CREATE_OBJECTS": { + "default": true, + "description": "When enabled (the default), mapped Organizations and Teams will be created automatically on successful SAML login.", + "title": "Automatically Create Organizations and Teams on SAML Login", + "type": "boolean" + }, + "SCHEDULE_MAX_JOBS": { + "default": 10, + "description": "Maximum number of the same job template that can be waiting to run when launching from a schedule before no more are created.", + "minimum": 1, + "title": "Maximum Scheduled Jobs", + "type": "integer" + }, + "SESSIONS_PER_USER": { + "default": -1, + "description": "Maximum number of simultaneous logged in sessions a user may have. To disable enter -1.", + "minimum": -1, + "title": "Maximum number of simultaneous logged in sessions", + "type": "integer" + }, + "SESSION_COOKIE_AGE": { + "default": 1800, + "description": "Number of seconds that a user is inactive before they will need to login again.", + "maximum": 30000000000, + "minimum": 60, + "title": "Idle Time Force Log Out", + "type": "integer" + }, + "SOCIAL_AUTH_AZUREAD_OAUTH2_CALLBACK_URL": { + "default": "https://towerhost/sso/complete/azuread-oauth2/", + "description": "Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. ", + "minLength": 1, + "readOnly": true, + "title": "Azure AD OAuth2 Callback URL", + "type": "string" + }, + "SOCIAL_AUTH_AZUREAD_OAUTH2_KEY": { + "default": "", + "description": "The OAuth2 key (Client ID) from your Azure AD application.", + "title": "Azure AD OAuth2 Key", + "type": "string" + }, + "SOCIAL_AUTH_AZUREAD_OAUTH2_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation.", + "title": "Azure AD OAuth2 Organization Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET": { + "default": "", + "description": "The OAuth2 secret (Client Secret) from your Azure AD application.", + "title": "Azure AD OAuth2 Secret", + "type": "string" + }, + "SOCIAL_AUTH_AZUREAD_OAUTH2_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation.", + "title": "Azure AD OAuth2 Team Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_GITHUB_CALLBACK_URL": { + "default": "https://towerhost/sso/complete/github/", + "description": "Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail.", + "minLength": 1, + "readOnly": true, + "title": "GitHub OAuth2 Callback URL", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL": { + "default": "", + "description": "The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details.", + "title": "GitHub Enterprise API URL", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_CALLBACK_URL": { + "default": "https://towerhost/sso/complete/github-enterprise/", + "description": "Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail.", + "minLength": 1, + "readOnly": true, + "title": "GitHub Enterprise OAuth2 Callback URL", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY": { + "default": "", + "description": "The OAuth2 key (Client ID) from your GitHub Enterprise developer application.", + "title": "GitHub Enterprise OAuth2 Key", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation.", + "title": "GitHub Enterprise OAuth2 Organization Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL": { + "default": "", + "description": "The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details.", + "title": "GitHub Enterprise Organization API URL", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_CALLBACK_URL": { + "default": "https://towerhost/sso/complete/github-enterprise-org/", + "description": "Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail.", + "minLength": 1, + "readOnly": true, + "title": "GitHub Enterprise Organization OAuth2 Callback URL", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY": { + "default": "", + "description": "The OAuth2 key (Client ID) from your GitHub Enterprise organization application.", + "title": "GitHub Enterprise Organization OAuth2 Key", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME": { + "default": "", + "description": "The name of your GitHub Enterprise organization, as used in your organization's URL: https://github.com//.", + "title": "GitHub Enterprise Organization Name", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation.", + "title": "GitHub Enterprise Organization OAuth2 Organization Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_SECRET": { + "default": "", + "description": "The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application.", + "title": "GitHub Enterprise Organization OAuth2 Secret", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation.", + "title": "GitHub Enterprise Organization OAuth2 Team Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL": { + "default": "", + "description": "The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details.", + "title": "GitHub Enterprise Organization URL", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET": { + "default": "", + "description": "The OAuth2 secret (Client Secret) from your GitHub Enterprise developer application.", + "title": "GitHub Enterprise OAuth2 Secret", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL": { + "default": "", + "description": "The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details.", + "title": "GitHub Enterprise Team API URL", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_CALLBACK_URL": { + "default": "https://towerhost/sso/complete/github-enterprise-team/", + "description": "Create an organization-owned application at https://github.com/organizations//settings/applications and obtain an OAuth2 key (Client ID) and secret (Client Secret). Provide this URL as the callback URL for your application.", + "minLength": 1, + "readOnly": true, + "title": "GitHub Enterprise Team OAuth2 Callback URL", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID": { + "default": "", + "description": "Find the numeric team ID using the Github Enterprise API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/.", + "title": "GitHub Enterprise Team ID", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY": { + "default": "", + "description": "The OAuth2 key (Client ID) from your GitHub Enterprise organization application.", + "title": "GitHub Enterprise Team OAuth2 Key", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation.", + "title": "GitHub Enterprise OAuth2 Team Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation.", + "title": "GitHub Enterprise Team OAuth2 Organization Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_SECRET": { + "default": "", + "description": "The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application.", + "title": "GitHub Enterprise Team OAuth2 Secret", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation.", + "title": "GitHub Enterprise Team OAuth2 Team Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL": { + "default": "", + "description": "The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details.", + "title": "GitHub Enterprise Team URL", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_URL": { + "default": "", + "description": "The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details.", + "title": "GitHub Enterprise URL", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_KEY": { + "default": "", + "description": "The OAuth2 key (Client ID) from your GitHub developer application.", + "title": "GitHub OAuth2 Key", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation.", + "title": "GitHub OAuth2 Organization Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_GITHUB_ORG_CALLBACK_URL": { + "default": "https://towerhost/sso/complete/github-org/", + "description": "Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail.", + "minLength": 1, + "readOnly": true, + "title": "GitHub Organization OAuth2 Callback URL", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ORG_KEY": { + "default": "", + "description": "The OAuth2 key (Client ID) from your GitHub organization application.", + "title": "GitHub Organization OAuth2 Key", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ORG_NAME": { + "default": "", + "description": "The name of your GitHub organization, as used in your organization's URL: https://github.com//.", + "title": "GitHub Organization Name", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation.", + "title": "GitHub Organization OAuth2 Organization Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_GITHUB_ORG_SECRET": { + "default": "", + "description": "The OAuth2 secret (Client Secret) from your GitHub organization application.", + "title": "GitHub Organization OAuth2 Secret", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation.", + "title": "GitHub Organization OAuth2 Team Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_GITHUB_SECRET": { + "default": "", + "description": "The OAuth2 secret (Client Secret) from your GitHub developer application.", + "title": "GitHub OAuth2 Secret", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_TEAM_CALLBACK_URL": { + "default": "https://towerhost/sso/complete/github-team/", + "description": "Create an organization-owned application at https://github.com/organizations//settings/applications and obtain an OAuth2 key (Client ID) and secret (Client Secret). Provide this URL as the callback URL for your application.", + "minLength": 1, + "readOnly": true, + "title": "GitHub Team OAuth2 Callback URL", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_TEAM_ID": { + "default": "", + "description": "Find the numeric team ID using the Github API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/.", + "title": "GitHub Team ID", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_TEAM_KEY": { + "default": "", + "description": "The OAuth2 key (Client ID) from your GitHub organization application.", + "title": "GitHub Team OAuth2 Key", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation.", + "title": "GitHub OAuth2 Team Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation.", + "title": "GitHub Team OAuth2 Organization Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_GITHUB_TEAM_SECRET": { + "default": "", + "description": "The OAuth2 secret (Client Secret) from your GitHub organization application.", + "title": "GitHub Team OAuth2 Secret", + "type": "string" + }, + "SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation.", + "title": "GitHub Team OAuth2 Team Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_GOOGLE_OAUTH2_AUTH_EXTRA_ARGUMENTS": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": {}, + "description": "Extra arguments for Google OAuth2 login. You can restrict it to only allow a single domain to authenticate, even if the user is logged in with multple Google accounts. Refer to the documentation for more detail.", + "title": "Google OAuth2 Extra Arguments", + "type": "object" + }, + "SOCIAL_AUTH_GOOGLE_OAUTH2_CALLBACK_URL": { + "default": "https://towerhost/sso/complete/google-oauth2/", + "description": "Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail.", + "minLength": 1, + "readOnly": true, + "title": "Google OAuth2 Callback URL", + "type": "string" + }, + "SOCIAL_AUTH_GOOGLE_OAUTH2_KEY": { + "default": "", + "description": "The OAuth2 key from your web application.", + "title": "Google OAuth2 Key", + "type": "string" + }, + "SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation.", + "title": "Google OAuth2 Organization Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET": { + "default": "", + "description": "The OAuth2 secret from your web application.", + "title": "Google OAuth2 Secret", + "type": "string" + }, + "SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation.", + "title": "Google OAuth2 Team Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS": { + "default": [], + "description": "Update this setting to restrict the domains who are allowed to login using Google OAuth2.", + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array" + }, + "SOCIAL_AUTH_OIDC_KEY": { + "description": "The OIDC key (Client ID) from your IDP.", + "minLength": 1, + "title": "OIDC Key", + "type": "string" + }, + "SOCIAL_AUTH_OIDC_OIDC_ENDPOINT": { + "default": "", + "description": "The URL for your OIDC provider including the path up to /.well-known/openid-configuration", + "title": "OIDC Provider URL", + "type": "string" + }, + "SOCIAL_AUTH_OIDC_SECRET": { + "default": "", + "description": "The OIDC secret (Client Secret) from your IDP.", + "title": "OIDC Secret", + "type": "string" + }, + "SOCIAL_AUTH_OIDC_VERIFY_SSL": { + "default": true, + "description": "Verify the OIDC provider ssl certificate.", + "title": "Verify OIDC Provider Certificate", + "type": "boolean" + }, + "SOCIAL_AUTH_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation.", + "title": "Social Auth Organization Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_SAML_CALLBACK_URL": { + "default": "https://towerhost/sso/complete/saml/", + "description": "Register the service as a service provider (SP) with each identity provider (IdP) you have configured. Provide your SP Entity ID and this ACS URL for your application.", + "minLength": 1, + "readOnly": true, + "title": "SAML Assertion Consumer Service (ACS) URL", + "type": "string" + }, + "SOCIAL_AUTH_SAML_ENABLED_IDPS": { + "additionalProperties": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "type": "object" + }, + "default": {}, + "description": "Configure the Entity ID, SSO URL and certificate for each identity provider (IdP) in use. Multiple SAML IdPs are supported. Some IdPs may provide user data using attribute names that differ from the default OIDs. Attribute names may be overridden for each IdP. Refer to the Ansible documentation for additional details and syntax.", + "title": "SAML Enabled Identity Providers", + "type": "object" + }, + "SOCIAL_AUTH_SAML_EXTRA_DATA": { + "description": "A list of tuples that maps IDP attributes to extra_attributes. Each attribute will be a list of values, even if only 1 value.", + "items": { + "type": "string", + "x-nullable": true + }, + "type": "array", + "x-nullable": true + }, + "SOCIAL_AUTH_SAML_METADATA_URL": { + "default": "https://towerhost/sso/metadata/saml/", + "description": "If your identity provider (IdP) allows uploading an XML metadata file, you can download one from this URL.", + "minLength": 1, + "readOnly": true, + "title": "SAML Service Provider Metadata URL", + "type": "string" + }, + "SOCIAL_AUTH_SAML_ORGANIZATION_ATTR": { + "additionalProperties": { + "type": "string" + }, + "default": {}, + "description": "Used to translate user organization membership.", + "title": "SAML Organization Attribute Mapping", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_SAML_ORGANIZATION_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation.", + "title": "SAML Organization Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_SAML_ORG_INFO": { + "additionalProperties": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "type": "object" + }, + "default": {}, + "description": "Provide the URL, display name, and the name of your app. Refer to the documentation for example syntax.", + "title": "SAML Service Provider Organization Info", + "type": "object" + }, + "SOCIAL_AUTH_SAML_SECURITY_CONFIG": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": { + "requestedAuthnContext": false + }, + "description": "A dict of key value pairs that are passed to the underlying python-saml security setting https://github.com/onelogin/python-saml#settings", + "title": "SAML Security Config", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_SAML_SP_ENTITY_ID": { + "default": "", + "description": "The application-defined unique identifier used as the audience of the SAML service provider (SP) configuration. This is usually the URL for the service.", + "title": "SAML Service Provider Entity ID", + "type": "string" + }, + "SOCIAL_AUTH_SAML_SP_EXTRA": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "description": "A dict of key value pairs to be passed to the underlying python-saml Service Provider configuration setting.", + "title": "SAML Service Provider extra configuration data", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_SAML_SP_PRIVATE_KEY": { + "default": "", + "description": "Create a keypair to use as a service provider (SP) and include the private key content here.", + "title": "SAML Service Provider Private Key", + "type": "string" + }, + "SOCIAL_AUTH_SAML_SP_PUBLIC_CERT": { + "default": "", + "description": "Create a keypair to use as a service provider (SP) and include the certificate content here.", + "title": "SAML Service Provider Public Certificate", + "type": "string" + }, + "SOCIAL_AUTH_SAML_SUPPORT_CONTACT": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": {}, + "description": "Provide the name and email address of the support contact for your service provider. Refer to the documentation for example syntax.", + "title": "SAML Service Provider Support Contact", + "type": "object" + }, + "SOCIAL_AUTH_SAML_TEAM_ATTR": { + "additionalProperties": { + "type": "string" + }, + "default": {}, + "description": "Used to translate user team membership.", + "title": "SAML Team Attribute Mapping", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_SAML_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation.", + "title": "SAML Team Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_SAML_TECHNICAL_CONTACT": { + "additionalProperties": { + "type": "string", + "x-nullable": true + }, + "default": {}, + "description": "Provide the name and email address of the technical contact for your service provider. Refer to the documentation for example syntax.", + "title": "SAML Service Provider Technical Contact", + "type": "object" + }, + "SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR": { + "additionalProperties": { + "type": "string" + }, + "default": {}, + "description": "Used to map super users and system auditors from SAML.", + "title": "SAML User Flags Attribute Mapping", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_TEAM_MAP": { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "description": "Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation.", + "title": "Social Auth Team Map", + "type": "object", + "x-nullable": true + }, + "SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL": { + "default": false, + "description": "Enabling this setting will tell social auth to use the full Email as username instead of the full name", + "title": "Use Email address for usernames", + "type": "boolean" + }, + "SOCIAL_AUTH_USER_FIELDS": { + "description": "When set to an empty list `[]`, this setting prevents new user accounts from being created. Only users who have previously logged in using social auth or have a user account with a matching email address will be able to login.", + "items": { + "minLength": 1, + "type": "string" + }, + "type": "array", + "x-nullable": true + }, + "STDOUT_MAX_BYTES_DISPLAY": { + "default": 1048576, + "description": "Maximum Size of Standard Output in bytes to display before requiring the output be downloaded.", + "minimum": 0, + "title": "Standard Output Maximum Display Size", + "type": "integer" + }, + "SUBSCRIPTIONS_PASSWORD": { + "default": "", + "description": "This password is used to retrieve subscription and content information", + "title": "Red Hat or Satellite password", + "type": "string" + }, + "SUBSCRIPTIONS_USERNAME": { + "default": "", + "description": "This username is used to retrieve subscription and content information", + "title": "Red Hat or Satellite username", + "type": "string" + }, + "SUBSCRIPTION_USAGE_MODEL": { + "default": "", + "enum": [ + "", + "unique_managed_hosts" + ], + "title": "Defines subscription usage model and shows Host Metrics", + "type": "string" + }, + "TACACSPLUS_AUTH_PROTOCOL": { + "default": "ascii", + "description": "Choose the authentication protocol used by TACACS+ client.", + "enum": [ + "ascii", + "pap" + ], + "title": "TACACS+ Authentication Protocol", + "type": "string" + }, + "TACACSPLUS_HOST": { + "default": "", + "description": "Hostname of TACACS+ server.", + "title": "TACACS+ Server", + "type": "string" + }, + "TACACSPLUS_PORT": { + "default": 49, + "description": "Port number of TACACS+ server.", + "maximum": 65535, + "minimum": 1, + "title": "TACACS+ Port", + "type": "integer" + }, + "TACACSPLUS_REM_ADDR": { + "default": false, + "description": "Enable the client address sending by TACACS+ client.", + "title": "TACACS+ client address sending enabled", + "type": "boolean" + }, + "TACACSPLUS_SECRET": { + "default": "", + "description": "Shared secret for authenticating to TACACS+ server.", + "title": "TACACS+ Secret", + "type": "string" + }, + "TACACSPLUS_SESSION_TIMEOUT": { + "default": 5, + "description": "TACACS+ session timeout value in seconds, 0 disables timeout.", + "minimum": 0, + "title": "TACACS+ Auth Session Timeout", + "type": "integer" + }, + "TOWER_URL_BASE": { + "default": "https://towerhost", + "description": "This setting is used by services like notifications to render a valid url to the service.", + "minLength": 1, + "title": "Base URL of the service", + "type": "string" + }, + "UI_LIVE_UPDATES_ENABLED": { + "default": true, + "description": "If disabled, the page will not refresh when events are received. Reloading the page will be required to get the latest details.", + "title": "Enable Live Updates in the UI", + "type": "boolean" + }, + "UI_NEXT": { + "default": true, + "description": "Enable preview of new user interface.", + "title": "Enable Preview of New User Interface", + "type": "boolean" + } + }, + "required": [ + "ACTIVITY_STREAM_ENABLED", + "ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC", + "ORG_ADMINS_CAN_SEE_ALL_USERS", + "MANAGE_ORGANIZATION_AUTH", + "TOWER_URL_BASE", + "REMOTE_HOST_HEADERS", + "PROXY_IP_ALLOWED_LIST", + "ALLOW_JINJA_IN_EXTRA_VARS", + "AWX_ISOLATION_BASE_PATH", + "AWX_RUNNER_KEEPALIVE_SECONDS", + "GALAXY_TASK_ENV", + "PROJECT_UPDATE_VVV", + "STDOUT_MAX_BYTES_DISPLAY", + "EVENT_STDOUT_MAX_BYTES_DISPLAY", + "SCHEDULE_MAX_JOBS", + "AUTOMATION_ANALYTICS_LAST_GATHER", + "CLEANUP_HOST_METRICS_LAST_TS", + "HOST_METRIC_SUMMARY_TASK_LAST_TS", + "SESSION_COOKIE_AGE", + "SESSIONS_PER_USER", + "DISABLE_LOCAL_AUTH", + "AUTH_BASIC_ENABLED", + "MAX_UI_JOB_EVENTS", + "UI_LIVE_UPDATES_ENABLED", + "SOCIAL_AUTH_SAML_SP_PUBLIC_CERT", + "SOCIAL_AUTH_SAML_SP_PRIVATE_KEY", + "SOCIAL_AUTH_SAML_ORG_INFO", + "SOCIAL_AUTH_SAML_TECHNICAL_CONTACT", + "SOCIAL_AUTH_SAML_SUPPORT_CONTACT" + ], + "type": "object" + }, + "SystemJob": { + "properties": { + "canceled_on": { + "description": "The date and time when the cancel request was sent.", + "format": "date-time", + "readOnly": true, + "title": "Canceled on", + "type": "string", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "elapsed": { + "description": "Elapsed time in seconds that the job ran.", + "format": "decimal", + "readOnly": true, + "title": "Elapsed", + "type": "string" + }, + "event_processing_finished": { + "description": "Indicates whether all of the events generated by this unified job have been saved to the database.", + "readOnly": true, + "title": "Event processing finished", + "type": "boolean" + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "execution_node": { + "description": "The node the job executed on.", + "minLength": 1, + "readOnly": true, + "title": "Execution node", + "type": "string" + }, + "extra_vars": { + "default": "", + "title": "Extra vars", + "type": "string", + "x-nullable": true + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "finished": { + "description": "The date and time the job finished execution.", + "format": "date-time", + "readOnly": true, + "title": "Finished", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "job_args": { + "minLength": 1, + "readOnly": true, + "title": "Job args", + "type": "string" + }, + "job_cwd": { + "minLength": 1, + "readOnly": true, + "title": "Job cwd", + "type": "string" + }, + "job_env": { + "readOnly": true, + "title": "job_env", + "type": "object" + }, + "job_explanation": { + "description": "A status field to indicate the state of the job if it wasn't able to run and capture stdout", + "minLength": 1, + "readOnly": true, + "title": "Job explanation", + "type": "string" + }, + "job_type": { + "default": "", + "enum": [ + "cleanup_jobs", + "cleanup_activitystream", + "cleanup_sessions", + "cleanup_tokens" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "launch_type": { + "enum": [ + "manual", + "relaunch", + "callback", + "scheduled", + "dependency", + "workflow", + "webhook", + "sync", + "scm" + ], + "readOnly": true, + "title": "Launch type", + "type": "string" + }, + "launched_by": { + "readOnly": true, + "title": "Launched by", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "result_stdout": { + "readOnly": true, + "title": "Result stdout", + "type": "string" + }, + "result_traceback": { + "minLength": 1, + "readOnly": true, + "title": "Result traceback", + "type": "string" + }, + "started": { + "description": "The date and time the job was queued for starting.", + "format": "date-time", + "readOnly": true, + "title": "Started", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "system_job_template": { + "title": "System job template", + "type": "string", + "x-nullable": true + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "readOnly": true, + "title": "unified job template", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "work_unit_id": { + "description": "The Receptor work unit ID associated with this job.", + "minLength": 1, + "readOnly": true, + "title": "Work unit id", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "SystemJobCancel": { + "properties": { + "can_cancel": { + "readOnly": true, + "title": "Can cancel", + "type": "boolean" + } + }, + "type": "object" + }, + "SystemJobEvent": { + "properties": { + "changed": { + "readOnly": true, + "title": "Changed", + "type": "string" + }, + "counter": { + "minimum": 0, + "readOnly": true, + "title": "Counter", + "type": "integer" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "end_line": { + "minimum": 0, + "readOnly": true, + "title": "End line", + "type": "integer" + }, + "event": { + "readOnly": true, + "title": "Event", + "type": "string" + }, + "event_data": { + "default": {}, + "title": "Event data", + "type": "object" + }, + "event_display": { + "minLength": 1, + "readOnly": true, + "title": "Event display", + "type": "string" + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "start_line": { + "minimum": 0, + "readOnly": true, + "title": "Start line", + "type": "integer" + }, + "stdout": { + "minLength": 1, + "readOnly": true, + "title": "Stdout", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "system_job": { + "readOnly": true, + "title": "System job", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "uuid": { + "minLength": 1, + "readOnly": true, + "title": "Uuid", + "type": "string" + }, + "verbosity": { + "minimum": 0, + "readOnly": true, + "title": "Verbosity", + "type": "integer" + } + }, + "type": "object" + }, + "SystemJobList": { + "properties": { + "canceled_on": { + "description": "The date and time when the cancel request was sent.", + "format": "date-time", + "readOnly": true, + "title": "Canceled on", + "type": "string", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "elapsed": { + "description": "Elapsed time in seconds that the job ran.", + "format": "decimal", + "readOnly": true, + "title": "Elapsed", + "type": "string" + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "execution_node": { + "description": "The node the job executed on.", + "minLength": 1, + "readOnly": true, + "title": "Execution node", + "type": "string" + }, + "extra_vars": { + "default": "", + "title": "Extra vars", + "type": "string", + "x-nullable": true + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "finished": { + "description": "The date and time the job finished execution.", + "format": "date-time", + "readOnly": true, + "title": "Finished", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "job_explanation": { + "description": "A status field to indicate the state of the job if it wasn't able to run and capture stdout", + "minLength": 1, + "readOnly": true, + "title": "Job explanation", + "type": "string" + }, + "job_type": { + "default": "", + "enum": [ + "cleanup_jobs", + "cleanup_activitystream", + "cleanup_sessions", + "cleanup_tokens" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "launch_type": { + "enum": [ + "manual", + "relaunch", + "callback", + "scheduled", + "dependency", + "workflow", + "webhook", + "sync", + "scm" + ], + "readOnly": true, + "title": "Launch type", + "type": "string" + }, + "launched_by": { + "readOnly": true, + "title": "Launched by", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "result_stdout": { + "readOnly": true, + "title": "Result stdout", + "type": "string" + }, + "started": { + "description": "The date and time the job was queued for starting.", + "format": "date-time", + "readOnly": true, + "title": "Started", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "system_job_template": { + "title": "System job template", + "type": "string", + "x-nullable": true + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "readOnly": true, + "title": "unified job template", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "work_unit_id": { + "description": "The Receptor work unit ID associated with this job.", + "minLength": 1, + "readOnly": true, + "title": "Work unit id", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "SystemJobTemplate": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "job_type": { + "default": "", + "enum": [ + "cleanup_jobs", + "cleanup_activitystream", + "cleanup_sessions", + "cleanup_tokens" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "last_job_failed": { + "readOnly": true, + "title": "Last job failed", + "type": "boolean" + }, + "last_job_run": { + "format": "date-time", + "readOnly": true, + "title": "Last job run", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "next_job_run": { + "format": "date-time", + "readOnly": true, + "title": "Next job run", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled", + "never updated", + "ok", + "missing", + "none", + "updating" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "Team": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "organization": { + "title": "Organization", + "type": "integer" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name", + "organization" + ], + "type": "object" + }, + "TeamCredentialSerializerCreate": { + "properties": { + "cloud": { + "readOnly": true, + "title": "Cloud", + "type": "string" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credential_type": { + "description": "Specify the type of credential you want to create. Refer to the documentation for details on each type.", + "title": "Credential Type", + "type": "integer" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inputs": { + "default": {}, + "description": "Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax.", + "title": "Inputs", + "type": "object" + }, + "kind": { + "readOnly": true, + "title": "Kind", + "type": "string" + }, + "kubernetes": { + "readOnly": true, + "title": "Kubernetes", + "type": "string" + }, + "managed": { + "readOnly": true, + "title": "Managed", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "team": { + "description": "Write-only field used to add team to owner role. If provided, do not give either user or organization. Only valid for creation.", + "title": "Team", + "type": "integer", + "x-nullable": true + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name", + "credential_type" + ], + "type": "object" + }, + "UnifiedJobList": { + "properties": { + "canceled_on": { + "description": "The date and time when the cancel request was sent.", + "format": "date-time", + "readOnly": true, + "title": "Canceled on", + "type": "string", + "x-nullable": true + }, + "controller_node": { + "description": "The instance that managed the execution environment.", + "minLength": 1, + "readOnly": true, + "title": "Controller node", + "type": "string" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "elapsed": { + "description": "Elapsed time in seconds that the job ran.", + "format": "decimal", + "readOnly": true, + "title": "Elapsed", + "type": "string" + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "execution_node": { + "description": "The node the job executed on.", + "minLength": 1, + "readOnly": true, + "title": "Execution node", + "type": "string" + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "finished": { + "description": "The date and time the job finished execution.", + "format": "date-time", + "readOnly": true, + "title": "Finished", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "job_explanation": { + "description": "A status field to indicate the state of the job if it wasn't able to run and capture stdout", + "minLength": 1, + "readOnly": true, + "title": "Job explanation", + "type": "string" + }, + "launch_type": { + "enum": [ + "manual", + "relaunch", + "callback", + "scheduled", + "dependency", + "workflow", + "webhook", + "sync", + "scm" + ], + "readOnly": true, + "title": "Launch type", + "type": "string" + }, + "launched_by": { + "readOnly": true, + "title": "Launched by", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "started": { + "description": "The date and time the job was queued for starting.", + "format": "date-time", + "readOnly": true, + "title": "Started", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "readOnly": true, + "title": "unified job template", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "work_unit_id": { + "description": "The Receptor work unit ID associated with this job.", + "minLength": 1, + "readOnly": true, + "title": "Work unit id", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "UnifiedJobStdout": { + "properties": { + "result_stdout": { + "readOnly": true, + "title": "Result stdout", + "type": "string" + } + }, + "type": "object" + }, + "UnifiedJobTemplate": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "last_job_failed": { + "readOnly": true, + "title": "Last job failed", + "type": "boolean" + }, + "last_job_run": { + "format": "date-time", + "readOnly": true, + "title": "Last job run", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "next_job_run": { + "format": "date-time", + "readOnly": true, + "title": "Next job run", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled", + "never updated", + "ok", + "missing", + "none", + "updating" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "User": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "email": { + "maxLength": 254, + "title": "Email address", + "type": "string", + "x-nullable": true + }, + "external_account": { + "description": "Set if the account is managed by an external service", + "readOnly": true, + "title": "External account", + "type": "string" + }, + "first_name": { + "maxLength": 150, + "title": "First name", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "is_superuser": { + "default": false, + "description": "Designates that this user has all permissions without explicitly assigning them.", + "title": "Superuser status", + "type": "boolean", + "x-nullable": true + }, + "is_system_auditor": { + "default": false, + "title": "Is system auditor", + "type": "boolean" + }, + "last_login": { + "format": "date-time", + "readOnly": true, + "title": "Last login", + "type": "string", + "x-nullable": true + }, + "last_name": { + "maxLength": 150, + "title": "Last name", + "type": "string", + "x-nullable": true + }, + "ldap_dn": { + "minLength": 1, + "readOnly": true, + "title": "Ldap dn", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "password": { + "default": "", + "description": "Field used to change the password.", + "minLength": 1, + "title": "Password", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "username": { + "description": "Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.", + "maxLength": 150, + "minLength": 1, + "pattern": "^[\\w.@+-]+$", + "title": "Username", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "username" + ], + "type": "object" + }, + "UserAuthorizedToken": { + "properties": { + "application": { + "title": "Application", + "type": "integer" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "expires": { + "format": "date-time", + "readOnly": true, + "title": "Expires", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "Id", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "refresh_token": { + "readOnly": true, + "title": "Refresh token", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scope": { + "default": "write", + "description": "Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write'].", + "title": "Scope", + "type": "string", + "x-nullable": true + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "token": { + "readOnly": true, + "title": "Token", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "user": { + "description": "The user representing the token owner", + "readOnly": true, + "title": "User", + "type": "integer" + } + }, + "required": [ + "application" + ], + "type": "object" + }, + "UserCredentialSerializerCreate": { + "properties": { + "cloud": { + "readOnly": true, + "title": "Cloud", + "type": "string" + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "credential_type": { + "description": "Specify the type of credential you want to create. Refer to the documentation for details on each type.", + "title": "Credential Type", + "type": "integer" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inputs": { + "default": {}, + "description": "Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax.", + "title": "Inputs", + "type": "object" + }, + "kind": { + "readOnly": true, + "title": "Kind", + "type": "string" + }, + "kubernetes": { + "readOnly": true, + "title": "Kubernetes", + "type": "string" + }, + "managed": { + "readOnly": true, + "title": "Managed", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "user": { + "description": "Write-only field used to add user to owner role. If provided, do not give either team or organization. Only valid for creation.", + "title": "User", + "type": "integer", + "x-nullable": true + } + }, + "required": [ + "name", + "credential_type" + ], + "type": "object" + }, + "UserPersonalToken": { + "properties": { + "application": { + "readOnly": true, + "title": "Application", + "type": "integer", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "expires": { + "format": "date-time", + "readOnly": true, + "title": "Expires", + "type": "string" + }, + "id": { + "readOnly": true, + "title": "Id", + "type": "integer" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "refresh_token": { + "readOnly": true, + "title": "Refresh token", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scope": { + "default": "write", + "description": "Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write'].", + "title": "Scope", + "type": "string", + "x-nullable": true + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "token": { + "readOnly": true, + "title": "Token", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "user": { + "description": "The user representing the token owner", + "readOnly": true, + "title": "User", + "type": "integer" + } + }, + "type": "object" + }, + "WorkflowApproval": { + "properties": { + "approval_expiration": { + "readOnly": true, + "title": "Approval expiration", + "type": "string" + }, + "can_approve_or_deny": { + "readOnly": true, + "title": "Can approve or deny", + "type": "string" + }, + "canceled_on": { + "description": "The date and time when the cancel request was sent.", + "format": "date-time", + "readOnly": true, + "title": "Canceled on", + "type": "string", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "elapsed": { + "description": "Elapsed time in seconds that the job ran.", + "format": "decimal", + "readOnly": true, + "title": "Elapsed", + "type": "string" + }, + "event_processing_finished": { + "description": "Indicates whether all of the events generated by this unified job have been saved to the database.", + "readOnly": true, + "title": "Event processing finished", + "type": "boolean" + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "finished": { + "description": "The date and time the job finished execution.", + "format": "date-time", + "readOnly": true, + "title": "Finished", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "job_args": { + "minLength": 1, + "readOnly": true, + "title": "Job args", + "type": "string" + }, + "job_cwd": { + "minLength": 1, + "readOnly": true, + "title": "Job cwd", + "type": "string" + }, + "job_env": { + "readOnly": true, + "title": "job_env", + "type": "object" + }, + "job_explanation": { + "description": "A status field to indicate the state of the job if it wasn't able to run and capture stdout", + "minLength": 1, + "readOnly": true, + "title": "Job explanation", + "type": "string" + }, + "launch_type": { + "enum": [ + "manual", + "relaunch", + "callback", + "scheduled", + "dependency", + "workflow", + "webhook", + "sync", + "scm" + ], + "readOnly": true, + "title": "Launch type", + "type": "string" + }, + "launched_by": { + "readOnly": true, + "title": "Launched by", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "result_traceback": { + "minLength": 1, + "readOnly": true, + "title": "Result traceback", + "type": "string" + }, + "started": { + "description": "The date and time the job was queued for starting.", + "format": "date-time", + "readOnly": true, + "title": "Started", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timed_out": { + "readOnly": true, + "title": "Timed out", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "readOnly": true, + "title": "unified job template", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "work_unit_id": { + "description": "The Receptor work unit ID associated with this job.", + "minLength": 1, + "readOnly": true, + "title": "Work unit id", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "WorkflowApprovalList": { + "properties": { + "approval_expiration": { + "readOnly": true, + "title": "Approval expiration", + "type": "string" + }, + "can_approve_or_deny": { + "readOnly": true, + "title": "Can approve or deny", + "type": "string" + }, + "canceled_on": { + "description": "The date and time when the cancel request was sent.", + "format": "date-time", + "readOnly": true, + "title": "Canceled on", + "type": "string", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "elapsed": { + "description": "Elapsed time in seconds that the job ran.", + "format": "decimal", + "readOnly": true, + "title": "Elapsed", + "type": "string" + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "finished": { + "description": "The date and time the job finished execution.", + "format": "date-time", + "readOnly": true, + "title": "Finished", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "job_explanation": { + "description": "A status field to indicate the state of the job if it wasn't able to run and capture stdout", + "minLength": 1, + "readOnly": true, + "title": "Job explanation", + "type": "string" + }, + "launch_type": { + "enum": [ + "manual", + "relaunch", + "callback", + "scheduled", + "dependency", + "workflow", + "webhook", + "sync", + "scm" + ], + "readOnly": true, + "title": "Launch type", + "type": "string" + }, + "launched_by": { + "readOnly": true, + "title": "Launched by", + "type": "string" + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "started": { + "description": "The date and time the job was queued for starting.", + "format": "date-time", + "readOnly": true, + "title": "Started", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timed_out": { + "readOnly": true, + "title": "Timed out", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "readOnly": true, + "title": "unified job template", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "work_unit_id": { + "description": "The Receptor work unit ID associated with this job.", + "minLength": 1, + "readOnly": true, + "title": "Work unit id", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "WorkflowApprovalTemplate": { + "properties": { + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "last_job_failed": { + "readOnly": true, + "title": "Last job failed", + "type": "boolean" + }, + "last_job_run": { + "format": "date-time", + "readOnly": true, + "title": "Last job run", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "next_job_run": { + "format": "date-time", + "readOnly": true, + "title": "Next job run", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled", + "never updated", + "ok", + "missing", + "none", + "updating" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timeout": { + "default": 0, + "description": "The amount of time (in seconds) before the approval node expires and fails.", + "title": "Timeout", + "type": "integer" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "WorkflowApprovalView": { + "properties": {}, + "type": "object" + }, + "WorkflowJob": { + "properties": { + "allow_simultaneous": { + "default": false, + "title": "Allow simultaneous", + "type": "boolean", + "x-nullable": true + }, + "canceled_on": { + "description": "The date and time when the cancel request was sent.", + "format": "date-time", + "readOnly": true, + "title": "Canceled on", + "type": "string", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "elapsed": { + "description": "Elapsed time in seconds that the job ran.", + "format": "decimal", + "readOnly": true, + "title": "Elapsed", + "type": "string" + }, + "extra_vars": { + "default": "", + "title": "Extra vars", + "type": "string", + "x-nullable": true + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "finished": { + "description": "The date and time the job finished execution.", + "format": "date-time", + "readOnly": true, + "title": "Finished", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inventory": { + "description": "Inventory applied as a prompt, assuming job template prompts for inventory", + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "is_sliced_job": { + "default": false, + "title": "Is sliced job", + "type": "boolean", + "x-nullable": true + }, + "job_args": { + "minLength": 1, + "readOnly": true, + "title": "Job args", + "type": "string" + }, + "job_cwd": { + "minLength": 1, + "readOnly": true, + "title": "Job cwd", + "type": "string" + }, + "job_env": { + "readOnly": true, + "title": "job_env", + "type": "object" + }, + "job_explanation": { + "description": "A status field to indicate the state of the job if it wasn't able to run and capture stdout", + "minLength": 1, + "readOnly": true, + "title": "Job explanation", + "type": "string" + }, + "job_tags": { + "title": "Job tags", + "type": "string", + "x-nullable": true + }, + "job_template": { + "description": "If automatically created for a sliced job run, the job template the workflow job was created from.", + "title": "Job template", + "type": "string", + "x-nullable": true + }, + "launch_type": { + "enum": [ + "manual", + "relaunch", + "callback", + "scheduled", + "dependency", + "workflow", + "webhook", + "sync", + "scm" + ], + "readOnly": true, + "title": "Launch type", + "type": "string" + }, + "launched_by": { + "readOnly": true, + "title": "Launched by", + "type": "string" + }, + "limit": { + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "result_traceback": { + "minLength": 1, + "readOnly": true, + "title": "Result traceback", + "type": "string" + }, + "scm_branch": { + "title": "Scm branch", + "type": "string", + "x-nullable": true + }, + "skip_tags": { + "title": "Skip tags", + "type": "string", + "x-nullable": true + }, + "started": { + "description": "The date and time the job was queued for starting.", + "format": "date-time", + "readOnly": true, + "title": "Started", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "readOnly": true, + "title": "unified job template", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "webhook_credential": { + "description": "Personal Access Token for posting back the status to the service API", + "title": "Webhook credential", + "type": "integer", + "x-nullable": true + }, + "webhook_guid": { + "description": "Unique identifier of the event that triggered this webhook", + "maxLength": 128, + "title": "Webhook guid", + "type": "string", + "x-nullable": true + }, + "webhook_service": { + "description": "Service that webhook requests will be accepted from", + "enum": [ + "github", + "gitlab" + ], + "title": "Webhook service", + "type": "string", + "x-nullable": true + }, + "work_unit_id": { + "description": "The Receptor work unit ID associated with this job.", + "minLength": 1, + "readOnly": true, + "title": "Work unit id", + "type": "string", + "x-nullable": true + }, + "workflow_job_template": { + "title": "Workflow job template", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "WorkflowJobCancel": { + "properties": { + "can_cancel": { + "readOnly": true, + "title": "Can cancel", + "type": "boolean" + } + }, + "type": "object" + }, + "WorkflowJobLaunch": { + "properties": { + "ask_inventory_on_launch": { + "readOnly": true, + "title": "Ask inventory on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_labels_on_launch": { + "readOnly": true, + "title": "Ask labels on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_limit_on_launch": { + "readOnly": true, + "title": "Ask limit on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_scm_branch_on_launch": { + "readOnly": true, + "title": "Ask scm branch on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_skip_tags_on_launch": { + "readOnly": true, + "title": "Ask skip tags on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_tags_on_launch": { + "readOnly": true, + "title": "Ask tags on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_variables_on_launch": { + "readOnly": true, + "title": "Ask variables on launch", + "type": "boolean", + "x-nullable": true + }, + "can_start_without_user_input": { + "readOnly": true, + "title": "Can start without user input", + "type": "boolean" + }, + "defaults": { + "readOnly": true, + "title": "Defaults", + "type": "string" + }, + "extra_vars": { + "title": "Extra vars", + "type": "string" + }, + "inventory": { + "title": "Inventory", + "type": "integer" + }, + "job_tags": { + "title": "Job tags", + "type": "string" + }, + "labels": { + "items": { + "type": "integer" + }, + "type": "array", + "uniqueItems": true + }, + "limit": { + "title": "Limit", + "type": "string" + }, + "node_prompts_rejected": { + "readOnly": true, + "title": "Node prompts rejected", + "type": "string" + }, + "node_templates_missing": { + "readOnly": true, + "title": "Node templates missing", + "type": "string" + }, + "scm_branch": { + "title": "Scm branch", + "type": "string" + }, + "skip_tags": { + "title": "Skip tags", + "type": "string" + }, + "survey_enabled": { + "readOnly": true, + "title": "Survey enabled", + "type": "string" + }, + "variables_needed_to_start": { + "readOnly": true, + "title": "Variables needed to start", + "type": "string" + }, + "workflow_job_template_data": { + "readOnly": true, + "title": "Workflow job template data", + "type": "string" + } + }, + "type": "object" + }, + "WorkflowJobList": { + "properties": { + "allow_simultaneous": { + "default": false, + "title": "Allow simultaneous", + "type": "boolean", + "x-nullable": true + }, + "canceled_on": { + "description": "The date and time when the cancel request was sent.", + "format": "date-time", + "readOnly": true, + "title": "Canceled on", + "type": "string", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "elapsed": { + "description": "Elapsed time in seconds that the job ran.", + "format": "decimal", + "readOnly": true, + "title": "Elapsed", + "type": "string" + }, + "extra_vars": { + "default": "", + "title": "Extra vars", + "type": "string", + "x-nullable": true + }, + "failed": { + "readOnly": true, + "title": "Failed", + "type": "boolean" + }, + "finished": { + "description": "The date and time the job finished execution.", + "format": "date-time", + "readOnly": true, + "title": "Finished", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inventory": { + "description": "Inventory applied as a prompt, assuming job template prompts for inventory", + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "is_sliced_job": { + "default": false, + "title": "Is sliced job", + "type": "boolean", + "x-nullable": true + }, + "job_explanation": { + "description": "A status field to indicate the state of the job if it wasn't able to run and capture stdout", + "minLength": 1, + "readOnly": true, + "title": "Job explanation", + "type": "string" + }, + "job_tags": { + "title": "Job tags", + "type": "string", + "x-nullable": true + }, + "job_template": { + "description": "If automatically created for a sliced job run, the job template the workflow job was created from.", + "title": "Job template", + "type": "string", + "x-nullable": true + }, + "launch_type": { + "enum": [ + "manual", + "relaunch", + "callback", + "scheduled", + "dependency", + "workflow", + "webhook", + "sync", + "scm" + ], + "readOnly": true, + "title": "Launch type", + "type": "string" + }, + "launched_by": { + "readOnly": true, + "title": "Launched by", + "type": "string" + }, + "limit": { + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scm_branch": { + "title": "Scm branch", + "type": "string", + "x-nullable": true + }, + "skip_tags": { + "title": "Skip tags", + "type": "string", + "x-nullable": true + }, + "started": { + "description": "The date and time the job was queued for starting.", + "format": "date-time", + "readOnly": true, + "title": "Started", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "readOnly": true, + "title": "unified job template", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "webhook_credential": { + "description": "Personal Access Token for posting back the status to the service API", + "title": "Webhook credential", + "type": "integer", + "x-nullable": true + }, + "webhook_guid": { + "description": "Unique identifier of the event that triggered this webhook", + "maxLength": 128, + "title": "Webhook guid", + "type": "string", + "x-nullable": true + }, + "webhook_service": { + "description": "Service that webhook requests will be accepted from", + "enum": [ + "github", + "gitlab" + ], + "title": "Webhook service", + "type": "string", + "x-nullable": true + }, + "work_unit_id": { + "description": "The Receptor work unit ID associated with this job.", + "minLength": 1, + "readOnly": true, + "title": "Work unit id", + "type": "string", + "x-nullable": true + }, + "workflow_job_template": { + "title": "Workflow job template", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "WorkflowJobNodeDetail": { + "properties": { + "all_parents_must_converge": { + "default": false, + "description": "If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node", + "title": "All parents must converge", + "type": "boolean", + "x-nullable": true + }, + "always_nodes": { + "items": { + "type": "integer" + }, + "readOnly": true, + "type": "array", + "uniqueItems": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "diff_mode": { + "title": "Diff mode", + "type": "boolean", + "x-nullable": true + }, + "do_not_run": { + "default": false, + "description": "Indicates that a job will not be created when True. Workflow runtime semantics will mark this True if the node is in a path that will decidedly not be ran. A value of False means the node may not run.", + "title": "Do not run", + "type": "boolean", + "x-nullable": true + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "extra_data": { + "default": {}, + "title": "Extra data", + "type": "object" + }, + "failure_nodes": { + "items": { + "type": "integer" + }, + "readOnly": true, + "type": "array", + "uniqueItems": true + }, + "forks": { + "minimum": 0, + "title": "Forks", + "type": "integer", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "identifier": { + "description": "An identifier coresponding to the workflow job template node that this node was created from.", + "maxLength": 512, + "title": "Identifier", + "type": "string", + "x-nullable": true + }, + "inventory": { + "description": "Inventory applied as a prompt, assuming job template prompts for inventory", + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "job": { + "title": "Job", + "type": "integer", + "x-nullable": true + }, + "job_slice_count": { + "minimum": 0, + "title": "Job slice count", + "type": "integer", + "x-nullable": true + }, + "job_tags": { + "title": "Job tags", + "type": "string", + "x-nullable": true + }, + "job_type": { + "enum": [ + "run", + "check" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "limit": { + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scm_branch": { + "title": "Scm branch", + "type": "string", + "x-nullable": true + }, + "skip_tags": { + "title": "Skip tags", + "type": "string", + "x-nullable": true + }, + "success_nodes": { + "items": { + "type": "integer" + }, + "readOnly": true, + "type": "array", + "uniqueItems": true + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timeout": { + "title": "Timeout", + "type": "integer", + "x-nullable": true + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "title": "Unified job template", + "type": "integer", + "x-nullable": true + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "verbosity": { + "enum": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "title": "Verbosity", + "type": "string", + "x-nullable": true + }, + "workflow_job": { + "title": "Workflow job", + "type": "string", + "x-nullable": true + } + }, + "type": "object" + }, + "WorkflowJobNodeList": { + "properties": { + "all_parents_must_converge": { + "default": false, + "description": "If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node", + "title": "All parents must converge", + "type": "boolean", + "x-nullable": true + }, + "always_nodes": { + "items": { + "type": "integer" + }, + "readOnly": true, + "type": "array", + "uniqueItems": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "diff_mode": { + "title": "Diff mode", + "type": "boolean", + "x-nullable": true + }, + "do_not_run": { + "default": false, + "description": "Indicates that a job will not be created when True. Workflow runtime semantics will mark this True if the node is in a path that will decidedly not be ran. A value of False means the node may not run.", + "title": "Do not run", + "type": "boolean", + "x-nullable": true + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "extra_data": { + "default": {}, + "title": "Extra data", + "type": "object" + }, + "failure_nodes": { + "items": { + "type": "integer" + }, + "readOnly": true, + "type": "array", + "uniqueItems": true + }, + "forks": { + "minimum": 0, + "title": "Forks", + "type": "integer", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "identifier": { + "description": "An identifier coresponding to the workflow job template node that this node was created from.", + "maxLength": 512, + "title": "Identifier", + "type": "string", + "x-nullable": true + }, + "inventory": { + "description": "Inventory applied as a prompt, assuming job template prompts for inventory", + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "job": { + "title": "Job", + "type": "integer", + "x-nullable": true + }, + "job_slice_count": { + "minimum": 0, + "title": "Job slice count", + "type": "integer", + "x-nullable": true + }, + "job_tags": { + "title": "Job tags", + "type": "string", + "x-nullable": true + }, + "job_type": { + "enum": [ + "run", + "check" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "limit": { + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scm_branch": { + "title": "Scm branch", + "type": "string", + "x-nullable": true + }, + "skip_tags": { + "title": "Skip tags", + "type": "string", + "x-nullable": true + }, + "success_nodes": { + "items": { + "type": "integer" + }, + "readOnly": true, + "type": "array", + "uniqueItems": true + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timeout": { + "title": "Timeout", + "type": "integer", + "x-nullable": true + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "title": "Unified job template", + "type": "integer", + "x-nullable": true + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "verbosity": { + "enum": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "title": "Verbosity", + "type": "string", + "x-nullable": true + }, + "workflow_job": { + "title": "Workflow job", + "type": "string", + "x-nullable": true + } + }, + "type": "object" + }, + "WorkflowJobTemplate": { + "properties": { + "allow_simultaneous": { + "default": false, + "title": "Allow simultaneous", + "type": "boolean", + "x-nullable": true + }, + "ask_inventory_on_launch": { + "default": false, + "title": "Ask inventory on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_labels_on_launch": { + "default": false, + "title": "Ask labels on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_limit_on_launch": { + "default": false, + "title": "Ask limit on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_scm_branch_on_launch": { + "default": false, + "title": "Ask scm branch on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_skip_tags_on_launch": { + "default": false, + "title": "Ask skip tags on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_tags_on_launch": { + "default": false, + "title": "Ask tags on launch", + "type": "boolean", + "x-nullable": true + }, + "ask_variables_on_launch": { + "default": false, + "title": "Ask variables on launch", + "type": "boolean", + "x-nullable": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "extra_vars": { + "default": "", + "title": "Extra vars", + "type": "string", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "inventory": { + "description": "Inventory applied as a prompt, assuming job template prompts for inventory", + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "job_tags": { + "title": "Job tags", + "type": "string", + "x-nullable": true + }, + "last_job_failed": { + "readOnly": true, + "title": "Last job failed", + "type": "boolean" + }, + "last_job_run": { + "format": "date-time", + "readOnly": true, + "title": "Last job run", + "type": "string", + "x-nullable": true + }, + "limit": { + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "next_job_run": { + "format": "date-time", + "readOnly": true, + "title": "Next job run", + "type": "string", + "x-nullable": true + }, + "organization": { + "description": "The organization used to determine access to this template.", + "title": "Organization", + "type": "integer", + "x-nullable": true + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scm_branch": { + "title": "Scm branch", + "type": "string", + "x-nullable": true + }, + "skip_tags": { + "title": "Skip tags", + "type": "string", + "x-nullable": true + }, + "status": { + "enum": [ + "new", + "pending", + "waiting", + "running", + "successful", + "failed", + "error", + "canceled", + "never updated", + "ok", + "missing", + "none", + "updating" + ], + "readOnly": true, + "title": "Status", + "type": "string" + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "survey_enabled": { + "default": false, + "title": "Survey enabled", + "type": "boolean", + "x-nullable": true + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "webhook_credential": { + "description": "Personal Access Token for posting back the status to the service API", + "title": "Webhook credential", + "type": "integer", + "x-nullable": true + }, + "webhook_service": { + "description": "Service that webhook requests will be accepted from", + "enum": [ + "github", + "gitlab" + ], + "title": "Webhook service", + "type": "string", + "x-nullable": true + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "WorkflowJobTemplateNode": { + "properties": { + "all_parents_must_converge": { + "default": false, + "description": "If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node", + "title": "All parents must converge", + "type": "boolean", + "x-nullable": true + }, + "always_nodes": { + "items": { + "type": "integer" + }, + "readOnly": true, + "type": "array", + "uniqueItems": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "diff_mode": { + "title": "Diff mode", + "type": "boolean", + "x-nullable": true + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "extra_data": { + "default": {}, + "title": "Extra data", + "type": "object" + }, + "failure_nodes": { + "items": { + "type": "integer" + }, + "readOnly": true, + "type": "array", + "uniqueItems": true + }, + "forks": { + "minimum": 0, + "title": "Forks", + "type": "integer", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "identifier": { + "default": "00000000-0000-0000-0000-000000000000", + "description": "An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node.", + "maxLength": 512, + "minLength": 1, + "title": "Identifier", + "type": "string", + "x-nullable": true + }, + "inventory": { + "description": "Inventory applied as a prompt, assuming job template prompts for inventory", + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "job_slice_count": { + "minimum": 0, + "title": "Job slice count", + "type": "integer", + "x-nullable": true + }, + "job_tags": { + "title": "Job tags", + "type": "string", + "x-nullable": true + }, + "job_type": { + "enum": [ + "run", + "check" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "limit": { + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scm_branch": { + "title": "Scm branch", + "type": "string", + "x-nullable": true + }, + "skip_tags": { + "title": "Skip tags", + "type": "string", + "x-nullable": true + }, + "success_nodes": { + "items": { + "type": "integer" + }, + "readOnly": true, + "type": "array", + "uniqueItems": true + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timeout": { + "title": "Timeout", + "type": "integer", + "x-nullable": true + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "title": "Unified job template", + "type": "integer", + "x-nullable": true + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "verbosity": { + "enum": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "title": "Verbosity", + "type": "string", + "x-nullable": true + }, + "workflow_job_template": { + "title": "Workflow job template", + "type": "string" + } + }, + "required": [ + "workflow_job_template" + ], + "type": "object" + }, + "WorkflowJobTemplateNodeCreateApproval": { + "properties": { + "description": { + "default": "", + "title": "Description", + "type": "string", + "x-nullable": true + }, + "name": { + "maxLength": 512, + "minLength": 1, + "title": "Name", + "type": "string", + "x-nullable": true + }, + "timeout": { + "default": 0, + "description": "The amount of time (in seconds) before the approval node expires and fails.", + "title": "Timeout", + "type": "integer" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "WorkflowJobTemplateNodeDetail": { + "properties": { + "all_parents_must_converge": { + "default": false, + "description": "If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node", + "title": "All parents must converge", + "type": "boolean", + "x-nullable": true + }, + "always_nodes": { + "items": { + "type": "integer" + }, + "readOnly": true, + "type": "array", + "uniqueItems": true + }, + "created": { + "readOnly": true, + "title": "Created", + "type": "string" + }, + "diff_mode": { + "title": "Diff mode", + "type": "boolean", + "x-nullable": true + }, + "execution_environment": { + "description": "The container image to be used for execution.", + "title": "Execution environment", + "type": "integer", + "x-nullable": true + }, + "extra_data": { + "default": {}, + "title": "Extra data", + "type": "object" + }, + "failure_nodes": { + "items": { + "type": "integer" + }, + "readOnly": true, + "type": "array", + "uniqueItems": true + }, + "forks": { + "minimum": 0, + "title": "Forks", + "type": "integer", + "x-nullable": true + }, + "id": { + "readOnly": true, + "title": "ID", + "type": "integer" + }, + "identifier": { + "default": "00000000-0000-0000-0000-000000000000", + "description": "An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node.", + "maxLength": 512, + "minLength": 1, + "title": "Identifier", + "type": "string", + "x-nullable": true + }, + "inventory": { + "description": "Inventory applied as a prompt, assuming job template prompts for inventory", + "title": "Inventory", + "type": "integer", + "x-nullable": true + }, + "job_slice_count": { + "minimum": 0, + "title": "Job slice count", + "type": "integer", + "x-nullable": true + }, + "job_tags": { + "title": "Job tags", + "type": "string", + "x-nullable": true + }, + "job_type": { + "enum": [ + "run", + "check" + ], + "title": "Job type", + "type": "string", + "x-nullable": true + }, + "limit": { + "title": "Limit", + "type": "string", + "x-nullable": true + }, + "modified": { + "readOnly": true, + "title": "Modified", + "type": "string" + }, + "related": { + "readOnly": true, + "title": "Related", + "type": "string" + }, + "scm_branch": { + "title": "Scm branch", + "type": "string", + "x-nullable": true + }, + "skip_tags": { + "title": "Skip tags", + "type": "string", + "x-nullable": true + }, + "success_nodes": { + "items": { + "type": "integer" + }, + "readOnly": true, + "type": "array", + "uniqueItems": true + }, + "summary_fields": { + "readOnly": true, + "title": "Summary fields", + "type": "string" + }, + "timeout": { + "title": "Timeout", + "type": "integer", + "x-nullable": true + }, + "type": { + "readOnly": true, + "title": "Type", + "type": "string" + }, + "unified_job_template": { + "title": "Unified job template", + "type": "integer", + "x-nullable": true + }, + "url": { + "readOnly": true, + "title": "Url", + "type": "string" + }, + "verbosity": { + "enum": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "title": "Verbosity", + "type": "string", + "x-nullable": true + }, + "workflow_job_template": { + "title": "Workflow job template", + "type": "string" + } + }, + "required": [ + "workflow_job_template" + ], + "type": "object" + } + }, + "host": null, + "info": { + "contact": { + "email": "contact@snippets.local" + }, + "description": "Test description", + "license": { + "name": "BSD License" + }, + "termsOfService": "https://www.google.com/policies/terms/", + "title": "Snippets API", + "version": "v1" + }, + "paths": { + "/api/": { + "get": { + "description": "", + "operationId": "api_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "List supported API versions", + "tags": [ + "api" + ] + }, + "parameters": [] + }, + "/api/debug/": { + "get": { + "description": "", + "operationId": "api_debug_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "List of available debug urls", + "tags": [ + "api" + ] + }, + "parameters": [] + }, + "/api/debug/dependency_manager/": { + "get": { + "description": "", + "operationId": "api_debug_dependency_manager_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/debug/dependency_manager/", + "tags": [ + "api" + ] + }, + "parameters": [] + }, + "/api/debug/task_manager/": { + "get": { + "description": "", + "operationId": "api_debug_task_manager_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/debug/task_manager/", + "tags": [ + "api" + ] + }, + "parameters": [] + }, + "/api/debug/workflow_manager/": { + "get": { + "description": "", + "operationId": "api_debug_workflow_manager_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/debug/workflow_manager/", + "tags": [ + "api" + ] + }, + "parameters": [] + }, + "/api/o/": { + "get": { + "description": "Note endpoints other than `/api/o/authorize/` are not meant to be used in browsers and do not\nsupport HTTP GET. The endpoints here strictly follow\n[RFC specs for OAuth2](https://tools.ietf.org/html/rfc6749), so please use that for detailed\nreference. Note AWX net location default to `http://localhost:8013` in examples:\n\n\n## Create Token for an Application using Authorization code grant type\nGiven an application \"AuthCodeApp\" of grant type `authorization-code`, \nfrom the client app, the user makes a GET to the Authorize endpoint with \n\n* `response_type`\n* `client_id`\n* `redirect_uris`\n* `scope` \n\nAWX will respond with the authorization `code` and `state`\nto the redirect_uri specified in the application. The client application will then make a POST to the\n`api/o/token/` endpoint on AWX with\n\n* `code`\n* `client_id`\n* `client_secret`\n* `grant_type`\n* `redirect_uri`\n\nAWX will respond with the `access_token`, `token_type`, `refresh_token`, and `expires_in`. For more\ninformation on testing this flow, refer to [django-oauth-toolkit](http://django-oauth-toolkit.readthedocs.io/en/latest/tutorial/tutorial_01.html#test-your-authorization-server).\n\n\n## Create Token for an Application using Password grant type\n\nLog in is not required for `password` grant type, so a simple `curl` can be used to acquire a personal access token\nvia `/api/o/token/` with \n\n* `grant_type`: Required to be \"password\"\n* `username`\n* `password`\n* `client_id`: Associated application must have grant_type \"password\"\n* `client_secret`\n\nFor example:\n\n```bash\ncurl -X POST \\\n -H \"Content-Type: application/x-www-form-urlencoded\" \\\n -d \"grant_type=password&username=&password=&scope=read\" \\\n -u \"gwSPoasWSdNkMDtBN3Hu2WYQpPWCO9SwUEsKK22l:fI6ZpfocHYBGfm1tP92r0yIgCyfRdDQt0Tos9L8a4fNsJjQQMwp9569e\nIaUBsaVDgt2eiwOGe0bg5m5vCSstClZmtdy359RVx2rQK5YlIWyPlrolpt2LEpVeKXWaiybo\" \\\n http://localhost:8013/api/o/token/ -i\n```\nIn the above post request, parameters `username` and `password` are username and password of the related\nAWX user of the underlying application, and the authentication information is of format\n`:`, where `client_id` and `client_secret` are the corresponding fields of\nunderlying application.\n\nUpon success, access token, refresh token and other information are given in the response body in JSON", + "operationId": "api_o_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "This page lists OAuth 2 utility endpoints used for authorization, token refresh and revoke.", + "tags": [ + "api" + ] + }, + "parameters": [] + }, + "/api/v2/": { + "get": { + "description": "", + "operationId": "api_read", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "activity_stream": "/api/v2/activity_stream/", + "ad_hoc_commands": "/api/v2/ad_hoc_commands/", + "analytics": "/api/v2/analytics/", + "applications": "/api/v2/applications/", + "bulk": "/api/v2/bulk/", + "config": "/api/v2/config/", + "constructed_inventory": "/api/v2/constructed_inventories/", + "credential_input_sources": "/api/v2/credential_input_sources/", + "credential_types": "/api/v2/credential_types/", + "credentials": "/api/v2/credentials/", + "dashboard": "/api/v2/dashboard/", + "execution_environments": "/api/v2/execution_environments/", + "groups": "/api/v2/groups/", + "host_metric_summary_monthly": "/api/v2/host_metric_summary_monthly/", + "host_metrics": "/api/v2/host_metrics/", + "hosts": "/api/v2/hosts/", + "instance_groups": "/api/v2/instance_groups/", + "instances": "/api/v2/instances/", + "inventory": "/api/v2/inventories/", + "inventory_sources": "/api/v2/inventory_sources/", + "inventory_updates": "/api/v2/inventory_updates/", + "job_templates": "/api/v2/job_templates/", + "jobs": "/api/v2/jobs/", + "labels": "/api/v2/labels/", + "me": "/api/v2/me/", + "mesh_visualizer": "/api/v2/mesh_visualizer/", + "metrics": "/api/v2/metrics/", + "notification_templates": "/api/v2/notification_templates/", + "notifications": "/api/v2/notifications/", + "organizations": "/api/v2/organizations/", + "ping": "/api/v2/ping/", + "project_updates": "/api/v2/project_updates/", + "projects": "/api/v2/projects/", + "roles": "/api/v2/roles/", + "schedules": "/api/v2/schedules/", + "settings": "/api/v2/settings/", + "system_job_templates": "/api/v2/system_job_templates/", + "system_jobs": "/api/v2/system_jobs/", + "teams": "/api/v2/teams/", + "tokens": "/api/v2/tokens/", + "unified_job_templates": "/api/v2/unified_job_templates/", + "unified_jobs": "/api/v2/unified_jobs/", + "users": "/api/v2/users/", + "workflow_approvals": "/api/v2/workflow_approvals/", + "workflow_job_nodes": "/api/v2/workflow_job_nodes/", + "workflow_job_template_nodes": "/api/v2/workflow_job_template_nodes/", + "workflow_job_templates": "/api/v2/workflow_job_templates/", + "workflow_jobs": "/api/v2/workflow_jobs/" + } + } + } + }, + "summary": "List top level resources", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/activity_stream/": { + "get": { + "description": "activity streams.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/activity_stream/{id}/": { + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)", + "operationId": "api_activity_stream_read", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "action_node": "awx", + "changes": { + "action": "associate", + "object1": "user", + "object1_pk": 2, + "object2": "inventory", + "object2_pk": 23, + "relationship": "awx.main.models.rbac.Role_members" + }, + "id": 10, + "object1": "user", + "object2": "inventory", + "object_association": "role", + "object_type": "inventory", + "operation": "associate", + "related": { + "inventory": [ + "/api/v2/inventories/1/" + ], + "role": [ + "/api/v2/roles/23/" + ], + "user": [ + "/api/v2/users/2/" + ] + }, + "summary_fields": { + "inventory": [ + { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "inv1", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + } + ], + "role": [ + { + "id": 23, + "role_field": "read_role" + } + ], + "user": [ + { + "first_name": "", + "id": 2, + "last_name": "", + "username": "test" + } + ] + }, + "timestamp": "2018-02-01T08:00:00.000000Z", + "type": "activity_stream", + "url": "/api/v2/activity_stream/10/" + } + }, + "schema": { + "$ref": "#/definitions/ActivityStream" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single activity stream", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/ad_hoc_command_events/{id}/": { + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this ad hoc command event. (integer)\n* `type`: Data type for this ad hoc command event. (choice)\n* `url`: URL for this ad hoc command event. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this ad hoc command event was created. (datetime)\n* `modified`: Timestamp when this ad hoc command event was last modified. (datetime)\n* `ad_hoc_command`: (id)\n* `event`: (choice)\n - `runner_on_failed`: Host Failed\n - `runner_on_ok`: Host OK\n - `runner_on_unreachable`: Host Unreachable\n - `runner_on_skipped`: Host Skipped\n - `debug`: Debug\n - `verbose`: Verbose\n - `deprecated`: Deprecated\n - `warning`: Warning\n - `system_warning`: System Warning\n - `error`: Error\n* `counter`: (integer)\n* `event_display`: (string)\n* `event_data`: (json)\n* `failed`: (boolean)\n* `changed`: (boolean)\n* `uuid`: (string)\n* `host`: (id)\n* `host_name`: (string)\n* `stdout`: (string)\n* `start_line`: (integer)\n* `end_line`: (integer)\n* `verbosity`: (integer)", + "operationId": "api_ad_hoc_command_events_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/AdHocCommandEvent" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single ad hoc command event", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/ad_hoc_commands/": { + "get": { + "description": "ad hoc commands.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of ad hoc commands\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more ad hoc command records. \n\n## Results\n\nEach ad hoc command data structure includes the following fields:\n\n* `id`: Database ID for this ad hoc command. (integer)\n* `type`: Data type for this ad hoc command. (choice)\n* `url`: URL for this ad hoc command. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this ad hoc command was created. (datetime)\n* `modified`: Timestamp when this ad hoc command was last modified. (datetime)\n* `name`: Name of this ad hoc command. (string)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `limit`: (string)\n* `credential`: (id)\n* `module_name`: (choice)\n - `command`\n - `shell`\n - `yum`\n - `apt`\n - `apt_key`\n - `apt_repository`\n - `apt_rpm`\n - `service`\n - `group`\n - `user`\n - `mount`\n - `ping`\n - `selinux`\n - `setup`\n - `win_ping`\n - `win_service`\n - `win_updates`\n - `win_group`\n - `win_user`\n* `module_args`: (string)\n* `forks`: (integer)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (string)\n* `become_enabled`: (boolean)\n* `diff_mode`: (boolean)\n\n\n\n## Sorting\n\nTo specify that ad hoc commands are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_ad_hoc_commands_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/AdHocCommandList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "ad hoc commands.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of ad hoc commands\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more ad hoc command records. \n\n## Results\n\nEach ad hoc command data structure includes the following fields:\n\n* `id`: Database ID for this ad hoc command. (integer)\n* `type`: Data type for this ad hoc command. (choice)\n* `url`: URL for this ad hoc command. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this ad hoc command was created. (datetime)\n* `modified`: Timestamp when this ad hoc command was last modified. (datetime)\n* `name`: Name of this ad hoc command. (string)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `limit`: (string)\n* `credential`: (id)\n* `module_name`: (choice)\n - `command`\n - `shell`\n - `yum`\n - `apt`\n - `apt_key`\n - `apt_repository`\n - `apt_rpm`\n - `service`\n - `group`\n - `user`\n - `mount`\n - `ping`\n - `selinux`\n - `setup`\n - `win_ping`\n - `win_service`\n - `win_updates`\n - `win_group`\n - `win_user`\n* `module_args`: (string)\n* `forks`: (integer)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (string)\n* `become_enabled`: (boolean)\n* `diff_mode`: (boolean)\n\n\n\n## Sorting\n\nTo specify that ad hoc commands are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_ad_hoc_commands_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "credential": 1, + "inventory": 1, + "module_args": "uptime", + "module_name": "command" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "become_enabled": false, + "canceled_on": null, + "controller_node": "", + "created": "2018-02-01T08:00:00.000000Z", + "credential": 1, + "diff_mode": false, + "elapsed": 0.0, + "execution_environment": null, + "execution_node": "", + "extra_vars": "", + "failed": false, + "finished": null, + "forks": 0, + "id": 1, + "inventory": 1, + "job_explanation": "", + "job_type": "run", + "launch_type": "manual", + "launched_by": {}, + "limit": "", + "modified": "2018-02-01T08:00:00.000000Z", + "module_args": "uptime", + "module_name": "command", + "name": "command", + "related": { + "activity_stream": "/api/v2/ad_hoc_commands/1/activity_stream/", + "cancel": "/api/v2/ad_hoc_commands/1/cancel/", + "credential": "/api/v2/credentials/1/", + "events": "/api/v2/ad_hoc_commands/1/events/", + "inventory": "/api/v2/inventories/1/", + "notifications": "/api/v2/ad_hoc_commands/1/notifications/", + "relaunch": "/api/v2/ad_hoc_commands/1/relaunch/", + "stdout": "/api/v2/ad_hoc_commands/1/stdout/" + }, + "started": null, + "status": "new", + "summary_fields": { + "credential": { + "cloud": false, + "credential_type_id": 1, + "description": "", + "id": 1, + "kind": "ssh", + "kubernetes": false, + "name": "machine-cred" + }, + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + }, + "user_capabilities": { + "delete": false, + "start": true + } + }, + "type": "ad_hoc_command", + "url": "/api/v2/ad_hoc_commands/1/", + "verbosity": 0, + "work_unit_id": null + } + }, + "schema": { + "$ref": "#/definitions/AdHocCommandList" + } + }, + "400": { + "examples": { + "application/json": { + "verbosity": [ + "\"-1\" is not a valid choice." + ] + } + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/ad_hoc_commands/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this ad hoc command. (integer)\n* `type`: Data type for this ad hoc command. (choice)\n* `url`: URL for this ad hoc command. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this ad hoc command was created. (datetime)\n* `modified`: Timestamp when this ad hoc command was last modified. (datetime)\n* `name`: Name of this ad hoc command. (string)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_args`: (string)\n* `job_cwd`: (string)\n* `job_env`: (json)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `result_traceback`: (string)\n* `event_processing_finished`: Indicates whether all of the events generated by this unified job have been saved to the database. (boolean)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `limit`: (string)\n* `credential`: (id)\n* `module_name`: (choice)\n - `command`\n - `shell`\n - `yum`\n - `apt`\n - `apt_key`\n - `apt_repository`\n - `apt_rpm`\n - `service`\n - `group`\n - `user`\n - `mount`\n - `ping`\n - `selinux`\n - `setup`\n - `win_ping`\n - `win_service`\n - `win_updates`\n - `win_group`\n - `win_user`\n* `module_args`: (string)\n* `forks`: (integer)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (string)\n* `become_enabled`: (boolean)\n* `diff_mode`: (boolean)\n* `host_status_counts`: Playbook stats from the Ansible playbook_on_stats event. (json)\n\n\n\n\n\n# Delete an Ad Hoc Command:\n\nMake a DELETE request to this resource to delete this ad hoc command.", + "operationId": "api_ad_hoc_commands_delete", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "204": { + "description": "" + }, + "403": { + "examples": { + "application/json": { + "detail": "Cannot delete running job resource." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single ad hoc command", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this ad hoc command. (integer)\n* `type`: Data type for this ad hoc command. (choice)\n* `url`: URL for this ad hoc command. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this ad hoc command was created. (datetime)\n* `modified`: Timestamp when this ad hoc command was last modified. (datetime)\n* `name`: Name of this ad hoc command. (string)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_args`: (string)\n* `job_cwd`: (string)\n* `job_env`: (json)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `result_traceback`: (string)\n* `event_processing_finished`: Indicates whether all of the events generated by this unified job have been saved to the database. (boolean)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `limit`: (string)\n* `credential`: (id)\n* `module_name`: (choice)\n - `command`\n - `shell`\n - `yum`\n - `apt`\n - `apt_key`\n - `apt_repository`\n - `apt_rpm`\n - `service`\n - `group`\n - `user`\n - `mount`\n - `ping`\n - `selinux`\n - `setup`\n - `win_ping`\n - `win_service`\n - `win_updates`\n - `win_group`\n - `win_user`\n* `module_args`: (string)\n* `forks`: (integer)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (string)\n* `become_enabled`: (boolean)\n* `diff_mode`: (boolean)\n* `host_status_counts`: Playbook stats from the Ansible playbook_on_stats event. (json)\n\n\n\n\n\n# Delete an Ad Hoc Command:\n\nMake a DELETE request to this resource to delete this ad hoc command.", + "operationId": "api_ad_hoc_commands_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/AdHocCommandDetail" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single ad hoc command", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/ad_hoc_commands/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\nad hoc command.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_ad_hoc_commands_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/ad_hoc_commands/{id}/cancel/": { + "get": { + "description": "record containing the following fields:\n\n* `can_cancel`: (boolean)", + "operationId": "api_ad_hoc_commands_cancel_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/AdHocCommandCancel" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single ad hoc command", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "record containing the following fields:\n\n* `can_cancel`: (boolean)", + "operationId": "api_ad_hoc_commands_cancel_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/AdHocCommandCancel" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/AdHocCommandCancel" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single ad hoc command", + "tags": [ + "api" + ] + } + }, + "/api/v2/ad_hoc_commands/{id}/events/": { + "get": { + "description": "ad hoc command events associated with the selected\nad hoc command.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of ad hoc command events\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more ad hoc command event records. \n\n## Results\n\nEach ad hoc command event data structure includes the following fields:\n\n* `id`: Database ID for this ad hoc command event. (integer)\n* `type`: Data type for this ad hoc command event. (choice)\n* `url`: URL for this ad hoc command event. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this ad hoc command event was created. (datetime)\n* `modified`: Timestamp when this ad hoc command event was last modified. (datetime)\n* `ad_hoc_command`: (id)\n* `event`: (choice)\n - `runner_on_failed`: Host Failed\n - `runner_on_ok`: Host OK\n - `runner_on_unreachable`: Host Unreachable\n - `runner_on_skipped`: Host Skipped\n - `debug`: Debug\n - `verbose`: Verbose\n - `deprecated`: Deprecated\n - `warning`: Warning\n - `system_warning`: System Warning\n - `error`: Error\n* `counter`: (integer)\n* `event_display`: (string)\n* `event_data`: (json)\n* `failed`: (boolean)\n* `changed`: (boolean)\n* `uuid`: (string)\n* `host`: (id)\n* `host_name`: (string)\n* `stdout`: (string)\n* `start_line`: (integer)\n* `end_line`: (integer)\n* `verbosity`: (integer)\n\n\n\n## Sorting\n\nTo specify that ad hoc command events are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_ad_hoc_commands_events_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "ad_hoc_command": 1, + "changed": false, + "counter": 0, + "created": "2018-02-01T08:00:00.000000Z", + "end_line": 0, + "event": "runner_on_start", + "event_data": {}, + "event_display": "runner_on_start", + "failed": false, + "host": null, + "host_name": "", + "id": 1, + "modified": "2018-02-01T08:00:00.000000Z", + "related": { + "ad_hoc_command": "/api/v2/ad_hoc_commands/1/" + }, + "start_line": 0, + "stdout": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "summary_fields": {}, + "type": "ad_hoc_command_event", + "url": "/api/v2/ad_hoc_command_events/1/", + "uuid": "abc123", + "verbosity": 0 + } + ] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/AdHocCommandEvent" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/ad_hoc_commands/{id}/notifications/": { + "get": { + "description": "notifications associated with the selected\nad hoc command.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notifications\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification records. \n\n## Results\n\nEach notification data structure includes the following fields:\n\n* `id`: Database ID for this notification. (integer)\n* `type`: Data type for this notification. (choice)\n* `url`: URL for this notification. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification was created. (datetime)\n* `modified`: Timestamp when this notification was last modified. (datetime)\n* `notification_template`: (id)\n* `error`: (string)\n* `status`: (choice)\n - `pending`: Pending\n - `successful`: Successful\n - `failed`: Failed\n* `notifications_sent`: (integer)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `recipients`: (string)\n* `subject`: (string)\n* `body`: Notification body (json)\n\n\n\n## Sorting\n\nTo specify that notifications are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_ad_hoc_commands_notifications_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Notification" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/ad_hoc_commands/{id}/relaunch/": { + "get": { + "description": "", + "operationId": "api_ad_hoc_commands_relaunch_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/AdHocCommandRelaunch" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a POST request to this resource to launch a job. If any passwords or variables are required then they should be passed in via POST data. In order to determine what values are required in order to launch a job based on this job template you may make a GET request to this endpoint.", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_ad_hoc_commands_relaunch_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/AdHocCommandRelaunch" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/AdHocCommandRelaunch" + } + } + }, + "summary": "Make a POST request to this resource to launch a job. If any passwords or variables are required then they should be passed in via POST data. In order to determine what values are required in order to launch a job based on this job template you may make a GET request to this endpoint.", + "tags": [ + "api" + ] + } + }, + "/api/v2/ad_hoc_commands/{id}/stdout/": { + "get": { + "description": "ad hoc command.\n\n## Format\n\nUse the `format` query string parameter to specify the output format.\n\n* Browsable API: `?format=api`\n* HTML: `?format=html`\n* Plain Text: `?format=txt`\n* Plain Text with ANSI color codes: `?format=ansi`\n* JSON structure: `?format=json`\n* Downloaded Plain Text: `?format=txt_download`\n* Downloaded Plain Text with ANSI color codes: `?format=ansi_download`\n\n(_New in Ansible Tower 2.0.0_) When using the Browsable API, HTML and JSON\nformats, the `start_line` and `end_line` query string parameters can be used\nto specify a range of line numbers to retrieve.\n\nUse `dark=1` or `dark=0` as a query string parameter to force or disable a\ndark background.\n\nFiles over 1.0\u00a0MB (configurable)\nwill not display in the browser. Use the `txt_download` or `ansi_download`\nformats to download the file directly to view it.", + "operationId": "api_ad_hoc_commands_stdout_read", + "parameters": [], + "produces": [ + "text/plain", + "text/plain", + "application/json", + "text/plain", + "text/plain" + ], + "responses": { + "200": { + "description": "", + "examples": { + "text/plain": "\u30aa0\n\u30aa1\n\u30aa2\n" + }, + "schema": { + "$ref": "#/definitions/UnifiedJobStdout" + } + } + }, + "summary": "Make GET request to this resource to retrieve the stdout from running this", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/analytics/": { + "get": { + "description": "", + "operationId": "api_analytics_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/analytics/adoption_rate/": { + "get": { + "description": "", + "operationId": "api_analytics_adoption_rate_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/adoption_rate/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_adoption_rate_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/adoption_rate/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/adoption_rate_options/": { + "get": { + "description": "", + "operationId": "api_analytics_adoption_rate_options_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/adoption_rate_options/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_adoption_rate_options_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/adoption_rate_options/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/authorized/": { + "get": { + "description": "", + "operationId": "api_analytics_authorized_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/authorized/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_authorized_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/authorized/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/event_explorer/": { + "get": { + "description": "", + "operationId": "api_analytics_event_explorer_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/event_explorer/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_event_explorer_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/event_explorer/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/event_explorer_options/": { + "get": { + "description": "", + "operationId": "api_analytics_event_explorer_options_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/event_explorer_options/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_event_explorer_options_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/event_explorer_options/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/host_explorer/": { + "get": { + "description": "", + "operationId": "api_analytics_host_explorer_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/host_explorer/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_host_explorer_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/host_explorer/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/host_explorer_options/": { + "get": { + "description": "", + "operationId": "api_analytics_host_explorer_options_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/host_explorer_options/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_host_explorer_options_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/host_explorer_options/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/job_explorer/": { + "get": { + "description": "", + "operationId": "api_analytics_job_explorer_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/job_explorer/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_job_explorer_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/job_explorer/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/job_explorer_options/": { + "get": { + "description": "", + "operationId": "api_analytics_job_explorer_options_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/job_explorer_options/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_job_explorer_options_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/job_explorer_options/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/probe_template_for_hosts/": { + "get": { + "description": "", + "operationId": "api_analytics_probe_template_for_hosts_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/probe_template_for_hosts/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_probe_template_for_hosts_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/probe_template_for_hosts/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/probe_template_for_hosts_options/": { + "get": { + "description": "", + "operationId": "api_analytics_probe_template_for_hosts_options_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/probe_template_for_hosts_options/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_probe_template_for_hosts_options_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/probe_template_for_hosts_options/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/probe_templates/": { + "get": { + "description": "", + "operationId": "api_analytics_probe_templates_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/probe_templates/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_probe_templates_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/probe_templates/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/probe_templates_options/": { + "get": { + "description": "", + "operationId": "api_analytics_probe_templates_options_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/probe_templates_options/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_probe_templates_options_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/probe_templates_options/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/report/{slug}/": { + "get": { + "description": "", + "operationId": "api_analytics_report_read", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/report/{slug}/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "slug", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_report_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/report/{slug}/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/report_options/": { + "get": { + "description": "", + "operationId": "api_analytics_report_options_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/report_options/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_report_options_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/report_options/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/reports/": { + "get": { + "description": "", + "operationId": "api_analytics_reports_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/reports/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_reports_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/reports/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/roi_templates/": { + "get": { + "description": "", + "operationId": "api_analytics_roi_templates_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/roi_templates/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_roi_templates_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/roi_templates/", + "tags": [ + "api" + ] + } + }, + "/api/v2/analytics/roi_templates_options/": { + "get": { + "description": "", + "operationId": "api_analytics_roi_templates_options_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "No Description for get on /api/{version}/analytics/roi_templates_options/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_analytics_roi_templates_options_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/analytics/roi_templates_options/", + "tags": [ + "api" + ] + } + }, + "/api/v2/applications/": { + "get": { + "description": "applications.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of applications\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more application records. \n\n## Results\n\nEach application data structure includes the following fields:\n\n* `id`: Database ID for this application. (integer)\n* `type`: Data type for this application. (choice)\n* `url`: URL for this application. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this application was created. (datetime)\n* `modified`: Timestamp when this application was last modified. (datetime)\n* `name`: Name of this application. (string)\n* `description`: Optional description of this application. (string)\n* `client_id`: (string)\n* `client_secret`: Used for more stringent verification of access to an application when creating a token. (string)\n* `client_type`: Set to Public or Confidential depending on how secure the client device is. (choice)\n - `confidential`: Confidential\n - `public`: Public\n* `redirect_uris`: Allowed URIs list, space separated (string)\n* `authorization_grant_type`: The Grant type the user must use for acquire tokens for this application. (choice)\n - `authorization-code`: Authorization code\n - `password`: Resource owner password-based\n* `skip_authorization`: Set True to skip authorization step for completely trusted applications. (boolean)\n* `organization`: Organization containing this application. (id)\n\n\n\n## Sorting\n\nTo specify that applications are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_applications_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/OAuth2Application" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "applications.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of applications\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more application records. \n\n## Results\n\nEach application data structure includes the following fields:\n\n* `id`: Database ID for this application. (integer)\n* `type`: Data type for this application. (choice)\n* `url`: URL for this application. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this application was created. (datetime)\n* `modified`: Timestamp when this application was last modified. (datetime)\n* `name`: Name of this application. (string)\n* `description`: Optional description of this application. (string)\n* `client_id`: (string)\n* `client_secret`: Used for more stringent verification of access to an application when creating a token. (string)\n* `client_type`: Set to Public or Confidential depending on how secure the client device is. (choice)\n - `confidential`: Confidential\n - `public`: Public\n* `redirect_uris`: Allowed URIs list, space separated (string)\n* `authorization_grant_type`: The Grant type the user must use for acquire tokens for this application. (choice)\n - `authorization-code`: Authorization code\n - `password`: Resource owner password-based\n* `skip_authorization`: Set True to skip authorization step for completely trusted applications. (boolean)\n* `organization`: Organization containing this application. (id)\n\n\n\n## Sorting\n\nTo specify that applications are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_applications_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/OAuth2Application" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/OAuth2Application" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/applications/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this application. (integer)\n* `type`: Data type for this application. (choice)\n* `url`: URL for this application. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this application was created. (datetime)\n* `modified`: Timestamp when this application was last modified. (datetime)\n* `name`: Name of this application. (string)\n* `description`: Optional description of this application. (string)\n* `client_id`: (string)\n* `client_secret`: Used for more stringent verification of access to an application when creating a token. (string)\n* `client_type`: Set to Public or Confidential depending on how secure the client device is. (choice)\n - `confidential`: Confidential\n - `public`: Public\n* `redirect_uris`: Allowed URIs list, space separated (string)\n* `authorization_grant_type`: The Grant type the user must use for acquire tokens for this application. (choice)\n - `authorization-code`: Authorization code\n - `password`: Resource owner password-based\n* `skip_authorization`: Set True to skip authorization step for completely trusted applications. (boolean)\n* `organization`: Organization containing this application. (id)\n\n\n\n\n\n# Update an Application:\n\nMake a PUT or PATCH request to this resource to update this\napplication. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this application. (string, required)\n* `description`: Optional description of this application. (string, default=`\"\"`)\n\n\n* `client_type`: Set to Public or Confidential depending on how secure the client device is. (choice, required)\n - `confidential`: Confidential\n - `public`: Public\n* `redirect_uris`: Allowed URIs list, space separated (string, default=`\"\"`)\n* `authorization_grant_type`: The Grant type the user must use for acquire tokens for this application. (choice, required)\n - `authorization-code`: Authorization code\n - `password`: Resource owner password-based\n* `skip_authorization`: Set True to skip authorization step for completely trusted applications. (boolean, default=`False`)\n* `organization`: Organization containing this application. (id, required)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Application:\n\nMake a DELETE request to this resource to delete this application.", + "operationId": "api_applications_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single application", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this application. (integer)\n* `type`: Data type for this application. (choice)\n* `url`: URL for this application. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this application was created. (datetime)\n* `modified`: Timestamp when this application was last modified. (datetime)\n* `name`: Name of this application. (string)\n* `description`: Optional description of this application. (string)\n* `client_id`: (string)\n* `client_secret`: Used for more stringent verification of access to an application when creating a token. (string)\n* `client_type`: Set to Public or Confidential depending on how secure the client device is. (choice)\n - `confidential`: Confidential\n - `public`: Public\n* `redirect_uris`: Allowed URIs list, space separated (string)\n* `authorization_grant_type`: The Grant type the user must use for acquire tokens for this application. (choice)\n - `authorization-code`: Authorization code\n - `password`: Resource owner password-based\n* `skip_authorization`: Set True to skip authorization step for completely trusted applications. (boolean)\n* `organization`: Organization containing this application. (id)\n\n\n\n\n\n# Update an Application:\n\nMake a PUT or PATCH request to this resource to update this\napplication. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this application. (string, required)\n* `description`: Optional description of this application. (string, default=`\"\"`)\n\n\n* `client_type`: Set to Public or Confidential depending on how secure the client device is. (choice, required)\n - `confidential`: Confidential\n - `public`: Public\n* `redirect_uris`: Allowed URIs list, space separated (string, default=`\"\"`)\n* `authorization_grant_type`: The Grant type the user must use for acquire tokens for this application. (choice, required)\n - `authorization-code`: Authorization code\n - `password`: Resource owner password-based\n* `skip_authorization`: Set True to skip authorization step for completely trusted applications. (boolean, default=`False`)\n* `organization`: Organization containing this application. (id, required)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Application:\n\nMake a DELETE request to this resource to delete this application.", + "operationId": "api_applications_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/OAuth2Application" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single application", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this application. (integer)\n* `type`: Data type for this application. (choice)\n* `url`: URL for this application. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this application was created. (datetime)\n* `modified`: Timestamp when this application was last modified. (datetime)\n* `name`: Name of this application. (string)\n* `description`: Optional description of this application. (string)\n* `client_id`: (string)\n* `client_secret`: Used for more stringent verification of access to an application when creating a token. (string)\n* `client_type`: Set to Public or Confidential depending on how secure the client device is. (choice)\n - `confidential`: Confidential\n - `public`: Public\n* `redirect_uris`: Allowed URIs list, space separated (string)\n* `authorization_grant_type`: The Grant type the user must use for acquire tokens for this application. (choice)\n - `authorization-code`: Authorization code\n - `password`: Resource owner password-based\n* `skip_authorization`: Set True to skip authorization step for completely trusted applications. (boolean)\n* `organization`: Organization containing this application. (id)\n\n\n\n\n\n# Update an Application:\n\nMake a PUT or PATCH request to this resource to update this\napplication. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this application. (string, required)\n* `description`: Optional description of this application. (string, default=`\"\"`)\n\n\n* `client_type`: Set to Public or Confidential depending on how secure the client device is. (choice, required)\n - `confidential`: Confidential\n - `public`: Public\n* `redirect_uris`: Allowed URIs list, space separated (string, default=`\"\"`)\n* `authorization_grant_type`: The Grant type the user must use for acquire tokens for this application. (choice, required)\n - `authorization-code`: Authorization code\n - `password`: Resource owner password-based\n* `skip_authorization`: Set True to skip authorization step for completely trusted applications. (boolean, default=`False`)\n* `organization`: Organization containing this application. (id, required)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Application:\n\nMake a DELETE request to this resource to delete this application.", + "operationId": "api_applications_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/OAuth2Application" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/OAuth2Application" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single application", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this application. (integer)\n* `type`: Data type for this application. (choice)\n* `url`: URL for this application. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this application was created. (datetime)\n* `modified`: Timestamp when this application was last modified. (datetime)\n* `name`: Name of this application. (string)\n* `description`: Optional description of this application. (string)\n* `client_id`: (string)\n* `client_secret`: Used for more stringent verification of access to an application when creating a token. (string)\n* `client_type`: Set to Public or Confidential depending on how secure the client device is. (choice)\n - `confidential`: Confidential\n - `public`: Public\n* `redirect_uris`: Allowed URIs list, space separated (string)\n* `authorization_grant_type`: The Grant type the user must use for acquire tokens for this application. (choice)\n - `authorization-code`: Authorization code\n - `password`: Resource owner password-based\n* `skip_authorization`: Set True to skip authorization step for completely trusted applications. (boolean)\n* `organization`: Organization containing this application. (id)\n\n\n\n\n\n# Update an Application:\n\nMake a PUT or PATCH request to this resource to update this\napplication. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this application. (string, required)\n* `description`: Optional description of this application. (string, default=`\"\"`)\n\n\n* `client_type`: Set to Public or Confidential depending on how secure the client device is. (choice, required)\n - `confidential`: Confidential\n - `public`: Public\n* `redirect_uris`: Allowed URIs list, space separated (string, default=`\"\"`)\n* `authorization_grant_type`: The Grant type the user must use for acquire tokens for this application. (choice, required)\n - `authorization-code`: Authorization code\n - `password`: Resource owner password-based\n* `skip_authorization`: Set True to skip authorization step for completely trusted applications. (boolean, default=`False`)\n* `organization`: Organization containing this application. (id, required)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Application:\n\nMake a DELETE request to this resource to delete this application.", + "operationId": "api_applications_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/OAuth2Application" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/OAuth2Application" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single application", + "tags": [ + "api" + ] + } + }, + "/api/v2/applications/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\napplication.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_applications_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/applications/{id}/tokens/": { + "get": { + "description": "access tokens associated with the selected\napplication.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of access tokens\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more access token records. \n\n## Results\n\nEach access token data structure includes the following fields:\n\n* `id`: Database ID for this access token. (integer)\n* `type`: Data type for this access token. (choice)\n* `url`: URL for this access token. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this access token was created. (datetime)\n* `modified`: Timestamp when this access token was last modified. (datetime)\n* `description`: Optional description of this access token. (string)\n* `user`: The user representing the token owner (id)\n* `token`: (string)\n* `refresh_token`: (field)\n* `application`: (id)\n* `expires`: (datetime)\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string)\n\n\n\n## Sorting\n\nTo specify that access tokens are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_applications_tokens_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/OAuth2Token" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "access tokens associated with the selected\napplication.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of access tokens\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more access token records. \n\n## Results\n\nEach access token data structure includes the following fields:\n\n* `id`: Database ID for this access token. (integer)\n* `type`: Data type for this access token. (choice)\n* `url`: URL for this access token. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this access token was created. (datetime)\n* `modified`: Timestamp when this access token was last modified. (datetime)\n* `description`: Optional description of this access token. (string)\n* `user`: The user representing the token owner (id)\n* `token`: (string)\n* `refresh_token`: (field)\n* `application`: (id)\n* `expires`: (datetime)\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string)\n\n\n\n## Sorting\n\nTo specify that access tokens are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_applications_tokens_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/OAuth2Token" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/OAuth2Token" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/auth/": { + "get": { + "description": "", + "operationId": "api_auth_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "List enabled single-sign-on endpoints", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/bulk/": { + "get": { + "description": "", + "operationId": "api_bulk_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "List top level resources", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/bulk/host_create/": { + "get": { + "description": "", + "operationId": "api_bulk_host_create_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/BulkHostCreate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "This endpoint allows the client to create multiple hosts and associate them with an inventory. They may do this by providing the inventory ID and a list of json that would normally be provided to create hosts.", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_bulk_host_create_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/BulkHostCreate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/BulkHostCreate" + } + } + }, + "summary": "This endpoint allows the client to create multiple hosts and associate them with an inventory. They may do this by providing the inventory ID and a list of json that would normally be provided to create hosts.", + "tags": [ + "api" + ] + } + }, + "/api/v2/bulk/job_launch/": { + "get": { + "description": "", + "operationId": "api_bulk_job_launch_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/BulkJobLaunch" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "This endpoint allows the client to launch multiple UnifiedJobTemplates at a time, along side any launch time parameters that they would normally set at launch time.", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_bulk_job_launch_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/BulkJobLaunch" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/BulkJobLaunch" + } + } + }, + "summary": "This endpoint allows the client to launch multiple UnifiedJobTemplates at a time, along side any launch time parameters that they would normally set at launch time.", + "tags": [ + "api" + ] + } + }, + "/api/v2/config/": { + "delete": { + "description": "the following fields (some fields may not be visible to all users):\n\n* `project_base_dir`: Path on the server where projects and playbooks are \\\n stored.\n* `project_local_paths`: List of directories beneath `project_base_dir` to\n use when creating/editing a manual project.\n* `time_zone`: The configured time zone for the server.\n* `license_info`: Information about the current license.\n* `version`: Version of Ansible Tower package installed.\n* `custom_virtualenvs`: Deprecated venv locations from before migration to\n execution environments. Export tooling is in `awx-manage` commands.\n* `eula`: The current End-User License Agreement\n\n\n\n# Install or update an existing license\n\n(_New in Ansible Tower 2.0.0_) Make a POST request to this resource as a super\nuser to install or update the existing license. The license data itself can\nbe POSTed as a normal json data structure.\n\n(_New in Ansible Tower 2.1.1_) The POST must include a `eula_accepted` boolean\nelement indicating acceptance of the End-User License Agreement.\n\n\n\n# Delete an existing license\n\n(_New in Ansible Tower 2.0.0_) Make a DELETE request to this resource as a super\nuser to delete the existing license", + "operationId": "api_config_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make a GET request to this resource to retrieve the configuration containing", + "tags": [ + "api" + ] + }, + "get": { + "description": "", + "operationId": "api_config_list", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "analytics_collectors": { + "config": { + "description": "General platform configuration.", + "name": "config", + "version": "1.6" + }, + "counts": { + "description": "Counts of objects such as organizations, inventories, and projects", + "name": "counts", + "version": "1.2" + }, + "cred_type_counts": { + "description": "Counts of credentials by credential type", + "name": "cred_type_counts", + "version": "1.0" + }, + "events_table": { + "description": "Automation task records", + "name": "events_table", + "version": "1.5" + }, + "host_metric_summary_monthly_table": { + "description": "HostMetricSummaryMonthly export, full sync", + "name": "host_metric_summary_monthly_table", + "version": "1.0" + }, + "host_metric_table": { + "description": "Host Metric data, incremental/full sync", + "name": "host_metric_table", + "version": "1.0" + }, + "instance_info": { + "description": "Cluster topology and capacity", + "name": "instance_info", + "version": "1.3" + }, + "inventory_counts": { + "description": "Inventories, their inventory sources, and host counts", + "name": "inventory_counts", + "version": "1.2" + }, + "org_counts": { + "description": "Counts of users and teams by organization", + "name": "org_counts", + "version": "1.0" + }, + "projects_by_scm_type": { + "description": "Counts of projects by source control type", + "name": "projects_by_scm_type", + "version": "1.0" + }, + "query_info": { + "description": "Metadata about the analytics collected", + "name": "query_info", + "version": "1.0" + }, + "unified_job_template_table": { + "description": "Data on job templates", + "name": "unified_job_template_table", + "version": "1.1" + }, + "unified_jobs_table": { + "description": "Data on jobs run", + "name": "unified_jobs_table", + "version": "1.4" + }, + "workflow_job_node_table": { + "description": "Data on workflow runs", + "name": "workflow_job_node_table", + "version": "1.0" + }, + "workflow_job_template_node_table": { + "description": "Data on workflows", + "name": "workflow_job_template_node_table", + "version": "1.0" + } + }, + "analytics_status": "off", + "become_methods": [ + [ + "sudo", + "Sudo" + ], + [ + "su", + "Su" + ], + [ + "pbrun", + "Pbrun" + ], + [ + "pfexec", + "Pfexec" + ], + [ + "dzdo", + "DZDO" + ], + [ + "pmrun", + "Pmrun" + ], + [ + "runas", + "Runas" + ], + [ + "enable", + "Enable" + ], + [ + "doas", + "Doas" + ], + [ + "ksu", + "Ksu" + ], + [ + "machinectl", + "Machinectl" + ], + [ + "sesu", + "Sesu" + ] + ], + "custom_virtualenvs": [], + "eula": "", + "license_info": { + "license_type": "open", + "product_name": "AWX", + "subscription_name": "OPEN", + "valid_key": true + }, + "project_base_dir": "/var/lib/awx/projects/", + "project_local_paths": [], + "time_zone": "UTC", + "ui_next": true, + "version": "4.5.1.dev5+g0b88711771" + } + } + } + }, + "summary": "Return various sitewide configuration settings", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "the following fields (some fields may not be visible to all users):\n\n* `project_base_dir`: Path on the server where projects and playbooks are \\\n stored.\n* `project_local_paths`: List of directories beneath `project_base_dir` to\n use when creating/editing a manual project.\n* `time_zone`: The configured time zone for the server.\n* `license_info`: Information about the current license.\n* `version`: Version of Ansible Tower package installed.\n* `custom_virtualenvs`: Deprecated venv locations from before migration to\n execution environments. Export tooling is in `awx-manage` commands.\n* `eula`: The current End-User License Agreement\n\n\n\n# Install or update an existing license\n\n(_New in Ansible Tower 2.0.0_) Make a POST request to this resource as a super\nuser to install or update the existing license. The license data itself can\nbe POSTed as a normal json data structure.\n\n(_New in Ansible Tower 2.1.1_) The POST must include a `eula_accepted` boolean\nelement indicating acceptance of the End-User License Agreement.\n\n\n\n# Delete an existing license\n\n(_New in Ansible Tower 2.0.0_) Make a DELETE request to this resource as a super\nuser to delete the existing license", + "operationId": "api_config_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "Make a GET request to this resource to retrieve the configuration containing", + "tags": [ + "api" + ] + } + }, + "/api/v2/config/attach/": { + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_config_attach_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/config/attach/", + "tags": [ + "api" + ] + } + }, + "/api/v2/config/subscriptions/": { + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_config_subscriptions_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/config/subscriptions/", + "tags": [ + "api" + ] + } + }, + "/api/v2/constructed_inventories/": { + "get": { + "description": "inventories.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventories\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory records. \n\n## Results\n\nEach inventory data structure includes the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n* `source_vars`: The source_vars for the related auto-created inventory source, special to constructed inventory. (string)\n* `update_cache_timeout`: The cache timeout for the related auto-created inventory source, special to constructed inventory (integer)\n* `limit`: The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory. (string)\n* `verbosity`: The verbosity level for the related auto-created inventory source, special to constructed inventory (integer)\n\n\n\n## Sorting\n\nTo specify that inventories are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_constructed_inventories_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ConstructedInventory" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "inventories.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventories\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory records. \n\n## Results\n\nEach inventory data structure includes the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n* `source_vars`: The source_vars for the related auto-created inventory source, special to constructed inventory. (string)\n* `update_cache_timeout`: The cache timeout for the related auto-created inventory source, special to constructed inventory (integer)\n* `limit`: The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory. (string)\n* `verbosity`: The verbosity level for the related auto-created inventory source, special to constructed inventory (integer)\n\n\n\n## Sorting\n\nTo specify that inventories are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_constructed_inventories_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/ConstructedInventory" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/ConstructedInventory" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/constructed_inventories/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n* `source_vars`: The source_vars for the related auto-created inventory source, special to constructed inventory. (string)\n* `update_cache_timeout`: The cache timeout for the related auto-created inventory source, special to constructed inventory (integer)\n* `limit`: The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory. (string)\n* `verbosity`: The verbosity level for the related auto-created inventory source, special to constructed inventory (integer)\n\n\n\n\n\n# Update an Inventory:\n\nMake a PUT or PATCH request to this resource to update this\ninventory. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this inventory. (string, required)\n* `description`: Optional description of this inventory. (string, default=`\"\"`)\n* `organization`: Organization containing this inventory. (id, required)\n\n* `variables`: Inventory variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\n\n\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean, default=`False`)\n* `source_vars`: The source_vars for the related auto-created inventory source, special to constructed inventory. (string, default=`\"\"`)\n* `update_cache_timeout`: The cache timeout for the related auto-created inventory source, special to constructed inventory (integer, default=`None`)\n* `limit`: The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory. (string, default=`\"\"`)\n* `verbosity`: The verbosity level for the related auto-created inventory source, special to constructed inventory (integer, default=`None`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Inventory:\n\nMake a DELETE request to this resource to delete this inventory.", + "operationId": "api_constructed_inventories_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n* `source_vars`: The source_vars for the related auto-created inventory source, special to constructed inventory. (string)\n* `update_cache_timeout`: The cache timeout for the related auto-created inventory source, special to constructed inventory (integer)\n* `limit`: The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory. (string)\n* `verbosity`: The verbosity level for the related auto-created inventory source, special to constructed inventory (integer)\n\n\n\n\n\n# Update an Inventory:\n\nMake a PUT or PATCH request to this resource to update this\ninventory. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this inventory. (string, required)\n* `description`: Optional description of this inventory. (string, default=`\"\"`)\n* `organization`: Organization containing this inventory. (id, required)\n\n* `variables`: Inventory variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\n\n\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean, default=`False`)\n* `source_vars`: The source_vars for the related auto-created inventory source, special to constructed inventory. (string, default=`\"\"`)\n* `update_cache_timeout`: The cache timeout for the related auto-created inventory source, special to constructed inventory (integer, default=`None`)\n* `limit`: The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory. (string, default=`\"\"`)\n* `verbosity`: The verbosity level for the related auto-created inventory source, special to constructed inventory (integer, default=`None`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Inventory:\n\nMake a DELETE request to this resource to delete this inventory.", + "operationId": "api_constructed_inventories_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/ConstructedInventory" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n* `source_vars`: The source_vars for the related auto-created inventory source, special to constructed inventory. (string)\n* `update_cache_timeout`: The cache timeout for the related auto-created inventory source, special to constructed inventory (integer)\n* `limit`: The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory. (string)\n* `verbosity`: The verbosity level for the related auto-created inventory source, special to constructed inventory (integer)\n\n\n\n\n\n# Update an Inventory:\n\nMake a PUT or PATCH request to this resource to update this\ninventory. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this inventory. (string, required)\n* `description`: Optional description of this inventory. (string, default=`\"\"`)\n* `organization`: Organization containing this inventory. (id, required)\n\n* `variables`: Inventory variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\n\n\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean, default=`False`)\n* `source_vars`: The source_vars for the related auto-created inventory source, special to constructed inventory. (string, default=`\"\"`)\n* `update_cache_timeout`: The cache timeout for the related auto-created inventory source, special to constructed inventory (integer, default=`None`)\n* `limit`: The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory. (string, default=`\"\"`)\n* `verbosity`: The verbosity level for the related auto-created inventory source, special to constructed inventory (integer, default=`None`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Inventory:\n\nMake a DELETE request to this resource to delete this inventory.", + "operationId": "api_constructed_inventories_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/ConstructedInventory" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/ConstructedInventory" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n* `source_vars`: The source_vars for the related auto-created inventory source, special to constructed inventory. (string)\n* `update_cache_timeout`: The cache timeout for the related auto-created inventory source, special to constructed inventory (integer)\n* `limit`: The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory. (string)\n* `verbosity`: The verbosity level for the related auto-created inventory source, special to constructed inventory (integer)\n\n\n\n\n\n# Update an Inventory:\n\nMake a PUT or PATCH request to this resource to update this\ninventory. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this inventory. (string, required)\n* `description`: Optional description of this inventory. (string, default=`\"\"`)\n* `organization`: Organization containing this inventory. (id, required)\n\n* `variables`: Inventory variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\n\n\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean, default=`False`)\n* `source_vars`: The source_vars for the related auto-created inventory source, special to constructed inventory. (string, default=`\"\"`)\n* `update_cache_timeout`: The cache timeout for the related auto-created inventory source, special to constructed inventory (integer, default=`None`)\n* `limit`: The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory. (string, default=`\"\"`)\n* `verbosity`: The verbosity level for the related auto-created inventory source, special to constructed inventory (integer, default=`None`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Inventory:\n\nMake a DELETE request to this resource to delete this inventory.", + "operationId": "api_constructed_inventories_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/ConstructedInventory" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/ConstructedInventory" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory", + "tags": [ + "api" + ] + } + }, + "/api/v2/credential_input_sources/": { + "get": { + "description": "credential input sources.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credential input sources\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential input source records. \n\n## Results\n\nEach credential input source data structure includes the following fields:\n\n* `id`: Database ID for this credential input source. (integer)\n* `type`: Data type for this credential input source. (choice)\n* `url`: URL for this credential input source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential input source was created. (datetime)\n* `modified`: Timestamp when this credential input source was last modified. (datetime)\n* `description`: Optional description of this credential input source. (string)\n* `input_field_name`: (string)\n* `metadata`: (json)\n* `target_credential`: (id)\n* `source_credential`: (id)\n\n\n\n## Sorting\n\nTo specify that credential input sources are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credential_input_sources_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 0, + "next": null, + "previous": null, + "results": [] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/CredentialInputSource" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "credential input sources.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credential input sources\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential input source records. \n\n## Results\n\nEach credential input source data structure includes the following fields:\n\n* `id`: Database ID for this credential input source. (integer)\n* `type`: Data type for this credential input source. (choice)\n* `url`: URL for this credential input source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential input source was created. (datetime)\n* `modified`: Timestamp when this credential input source was last modified. (datetime)\n* `description`: Optional description of this credential input source. (string)\n* `input_field_name`: (string)\n* `metadata`: (json)\n* `target_credential`: (id)\n* `source_credential`: (id)\n\n\n\n## Sorting\n\nTo specify that credential input sources are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credential_input_sources_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "input_field_name": "vault_password", + "metadata": { + "key": "some_key" + }, + "source_credential": 3, + "target_credential": 2 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "id": 1, + "input_field_name": "vault_password", + "metadata": { + "key": "some_key" + }, + "modified": "2018-02-01T08:00:00.000000Z", + "related": { + "source_credential": "/api/v2/credentials/3/", + "target_credential": "/api/v2/credentials/2/" + }, + "source_credential": 3, + "summary_fields": { + "source_credential": { + "cloud": false, + "credential_type_id": 3, + "description": "", + "id": 3, + "name": "external-cred" + }, + "target_credential": { + "cloud": false, + "credential_type_id": 2, + "description": "", + "id": 2, + "kind": "vault", + "name": "test-cred" + }, + "user_capabilities": { + "delete": true + } + }, + "target_credential": 2, + "type": "credential_input_source", + "url": "/api/v2/credential_input_sources/1/" + } + }, + "schema": { + "$ref": "#/definitions/CredentialInputSource" + } + }, + "400": { + "examples": { + "application/json": { + "metadata": { + "key": [ + "required for External Service" + ] + } + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/credential_input_sources/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this credential input source. (integer)\n* `type`: Data type for this credential input source. (choice)\n* `url`: URL for this credential input source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential input source was created. (datetime)\n* `modified`: Timestamp when this credential input source was last modified. (datetime)\n* `description`: Optional description of this credential input source. (string)\n* `input_field_name`: (string)\n* `metadata`: (json)\n* `target_credential`: (id)\n* `source_credential`: (id)\n\n\n\n\n\n# Update a Credential Input Source:\n\nMake a PUT or PATCH request to this resource to update this\ncredential input source. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `description`: Optional description of this credential input source. (string, default=`\"\"`)\n* `input_field_name`: (string, required)\n* `metadata`: (json, default=`{}`)\n* `target_credential`: (id, required)\n* `source_credential`: (id, required)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Credential Input Source:\n\nMake a DELETE request to this resource to delete this credential input source.", + "operationId": "api_credential_input_sources_delete", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "204": { + "description": "" + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single credential input source", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this credential input source. (integer)\n* `type`: Data type for this credential input source. (choice)\n* `url`: URL for this credential input source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential input source was created. (datetime)\n* `modified`: Timestamp when this credential input source was last modified. (datetime)\n* `description`: Optional description of this credential input source. (string)\n* `input_field_name`: (string)\n* `metadata`: (json)\n* `target_credential`: (id)\n* `source_credential`: (id)\n\n\n\n\n\n# Update a Credential Input Source:\n\nMake a PUT or PATCH request to this resource to update this\ncredential input source. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `description`: Optional description of this credential input source. (string, default=`\"\"`)\n* `input_field_name`: (string, required)\n* `metadata`: (json, default=`{}`)\n* `target_credential`: (id, required)\n* `source_credential`: (id, required)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Credential Input Source:\n\nMake a DELETE request to this resource to delete this credential input source.", + "operationId": "api_credential_input_sources_read", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "id": 1, + "input_field_name": "vault_password", + "metadata": { + "key": "some_key" + }, + "modified": "2018-02-01T08:00:00.000000Z", + "related": { + "source_credential": "/api/v2/credentials/2/", + "target_credential": "/api/v2/credentials/1/" + }, + "source_credential": 2, + "summary_fields": { + "source_credential": { + "cloud": false, + "credential_type_id": 2, + "description": "", + "id": 2, + "name": "external-cred" + }, + "target_credential": { + "cloud": false, + "credential_type_id": 1, + "description": "", + "id": 1, + "kind": "vault", + "name": "test-cred" + }, + "user_capabilities": { + "delete": false + } + }, + "target_credential": 1, + "type": "credential_input_source", + "url": "/api/v2/credential_input_sources/1/" + } + }, + "schema": { + "$ref": "#/definitions/CredentialInputSource" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single credential input source", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this credential input source. (integer)\n* `type`: Data type for this credential input source. (choice)\n* `url`: URL for this credential input source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential input source was created. (datetime)\n* `modified`: Timestamp when this credential input source was last modified. (datetime)\n* `description`: Optional description of this credential input source. (string)\n* `input_field_name`: (string)\n* `metadata`: (json)\n* `target_credential`: (id)\n* `source_credential`: (id)\n\n\n\n\n\n# Update a Credential Input Source:\n\nMake a PUT or PATCH request to this resource to update this\ncredential input source. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `description`: Optional description of this credential input source. (string, default=`\"\"`)\n* `input_field_name`: (string, required)\n* `metadata`: (json, default=`{}`)\n* `target_credential`: (id, required)\n* `source_credential`: (id, required)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Credential Input Source:\n\nMake a DELETE request to this resource to delete this credential input source.", + "operationId": "api_credential_input_sources_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "input_field_name": "password", + "target_credential": 1 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "id": 1, + "input_field_name": "password", + "metadata": { + "key": "some_key" + }, + "modified": "2018-02-01T08:00:00.000000Z", + "related": { + "source_credential": "/api/v2/credentials/3/", + "target_credential": "/api/v2/credentials/1/" + }, + "source_credential": 3, + "summary_fields": { + "source_credential": { + "cloud": false, + "credential_type_id": 3, + "description": "", + "id": 3, + "name": "external-cred" + }, + "target_credential": { + "cloud": false, + "credential_type_id": 1, + "description": "", + "id": 1, + "kind": "ssh", + "name": "machine-cred" + }, + "user_capabilities": { + "delete": true + } + }, + "target_credential": 1, + "type": "credential_input_source", + "url": "/api/v2/credential_input_sources/1/" + } + }, + "schema": { + "$ref": "#/definitions/CredentialInputSource" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single credential input source", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this credential input source. (integer)\n* `type`: Data type for this credential input source. (choice)\n* `url`: URL for this credential input source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential input source was created. (datetime)\n* `modified`: Timestamp when this credential input source was last modified. (datetime)\n* `description`: Optional description of this credential input source. (string)\n* `input_field_name`: (string)\n* `metadata`: (json)\n* `target_credential`: (id)\n* `source_credential`: (id)\n\n\n\n\n\n# Update a Credential Input Source:\n\nMake a PUT or PATCH request to this resource to update this\ncredential input source. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `description`: Optional description of this credential input source. (string, default=`\"\"`)\n* `input_field_name`: (string, required)\n* `metadata`: (json, default=`{}`)\n* `target_credential`: (id, required)\n* `source_credential`: (id, required)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Credential Input Source:\n\nMake a DELETE request to this resource to delete this credential input source.", + "operationId": "api_credential_input_sources_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/CredentialInputSource" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/CredentialInputSource" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single credential input source", + "tags": [ + "api" + ] + } + }, + "/api/v2/credential_types/": { + "get": { + "description": "credential types.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credential types\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential type records. \n\n## Results\n\nEach credential type data structure includes the following fields:\n\n* `id`: Database ID for this credential type. (integer)\n* `type`: Data type for this credential type. (choice)\n* `url`: URL for this credential type. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential type was created. (datetime)\n* `modified`: Timestamp when this credential type was last modified. (datetime)\n* `name`: Name of this credential type. (string)\n* `description`: Optional description of this credential type. (string)\n* `kind`: (choice)\n - `ssh`: Machine\n - `vault`: Vault\n - `net`: Network\n - `scm`: Source Control\n - `cloud`: Cloud\n - `registry`: Container Registry\n - `token`: Personal Access Token\n - `insights`: Insights\n - `external`: External\n - `kubernetes`: Kubernetes\n - `galaxy`: Galaxy/Automation Hub\n - `cryptography`: Cryptography\n* `namespace`: (string)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `injectors`: Enter injectors using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n\n\n\n## Sorting\n\nTo specify that credential types are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credential_types_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 0, + "next": null, + "previous": null, + "results": [] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/CredentialType" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "credential types.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credential types\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential type records. \n\n## Results\n\nEach credential type data structure includes the following fields:\n\n* `id`: Database ID for this credential type. (integer)\n* `type`: Data type for this credential type. (choice)\n* `url`: URL for this credential type. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential type was created. (datetime)\n* `modified`: Timestamp when this credential type was last modified. (datetime)\n* `name`: Name of this credential type. (string)\n* `description`: Optional description of this credential type. (string)\n* `kind`: (choice)\n - `ssh`: Machine\n - `vault`: Vault\n - `net`: Network\n - `scm`: Source Control\n - `cloud`: Cloud\n - `registry`: Container Registry\n - `token`: Personal Access Token\n - `insights`: Insights\n - `external`: External\n - `kubernetes`: Kubernetes\n - `galaxy`: Galaxy/Automation Hub\n - `cryptography`: Cryptography\n* `namespace`: (string)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `injectors`: Enter injectors using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n\n\n\n## Sorting\n\nTo specify that credential types are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credential_types_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "injectors": {}, + "inputs": { + "fields": [ + { + "default": true, + "id": "api_token", + "label": "API Token", + "type": "boolean" + } + ], + "required": [ + "api_token" + ] + }, + "kind": "cloud", + "name": "MyCloud" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "id": 1, + "injectors": {}, + "inputs": { + "fields": [ + { + "default": true, + "id": "api_token", + "label": "API Token", + "type": "boolean" + } + ], + "required": [ + "api_token" + ] + }, + "kind": "cloud", + "managed": false, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "MyCloud", + "namespace": null, + "related": { + "activity_stream": "/api/v2/credential_types/1/activity_stream/", + "credentials": "/api/v2/credential_types/1/credentials/" + }, + "summary_fields": { + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "credential_type", + "url": "/api/v2/credential_types/1/" + } + }, + "schema": { + "$ref": "#/definitions/CredentialType" + } + }, + "400": { + "examples": { + "application/json": { + "injectors": [ + "AWX_MY_CLOUD_TOKEN uses an undefined field ('api_tolkien' is undefined)" + ] + } + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/credential_types/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this credential type. (integer)\n* `type`: Data type for this credential type. (choice)\n* `url`: URL for this credential type. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential type was created. (datetime)\n* `modified`: Timestamp when this credential type was last modified. (datetime)\n* `name`: Name of this credential type. (string)\n* `description`: Optional description of this credential type. (string)\n* `kind`: (choice)\n - `ssh`: Machine\n - `vault`: Vault\n - `net`: Network\n - `scm`: Source Control\n - `cloud`: Cloud\n - `registry`: Container Registry\n - `token`: Personal Access Token\n - `insights`: Insights\n - `external`: External\n - `kubernetes`: Kubernetes\n - `galaxy`: Galaxy/Automation Hub\n - `cryptography`: Cryptography\n* `namespace`: (string)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `injectors`: Enter injectors using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n\n\n\n\n\n# Update a Credential Type:\n\nMake a PUT or PATCH request to this resource to update this\ncredential type. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this credential type. (string, required)\n* `description`: Optional description of this credential type. (string, default=`\"\"`)\n* `kind`: (choice, required)\n - `net`: Network\n - `cloud`: Cloud\n\n\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json, default=`{}`)\n* `injectors`: Enter injectors using either JSON or YAML syntax. Refer to the documentation for example syntax. (json, default=`{}`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Credential Type:\n\nMake a DELETE request to this resource to delete this credential type.", + "operationId": "api_credential_types_delete", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "204": { + "description": "" + }, + "401": { + "examples": { + "application/json": { + "detail": "Authentication credentials were not provided. To establish a login session, visit /api/login/." + } + } + }, + "403": { + "examples": { + "application/json": { + "detail": "Credential types that are in use cannot be deleted" + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single credential type", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this credential type. (integer)\n* `type`: Data type for this credential type. (choice)\n* `url`: URL for this credential type. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential type was created. (datetime)\n* `modified`: Timestamp when this credential type was last modified. (datetime)\n* `name`: Name of this credential type. (string)\n* `description`: Optional description of this credential type. (string)\n* `kind`: (choice)\n - `ssh`: Machine\n - `vault`: Vault\n - `net`: Network\n - `scm`: Source Control\n - `cloud`: Cloud\n - `registry`: Container Registry\n - `token`: Personal Access Token\n - `insights`: Insights\n - `external`: External\n - `kubernetes`: Kubernetes\n - `galaxy`: Galaxy/Automation Hub\n - `cryptography`: Cryptography\n* `namespace`: (string)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `injectors`: Enter injectors using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n\n\n\n\n\n# Update a Credential Type:\n\nMake a PUT or PATCH request to this resource to update this\ncredential type. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this credential type. (string, required)\n* `description`: Optional description of this credential type. (string, default=`\"\"`)\n* `kind`: (choice, required)\n - `net`: Network\n - `cloud`: Cloud\n\n\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json, default=`{}`)\n* `injectors`: Enter injectors using either JSON or YAML syntax. Refer to the documentation for example syntax. (json, default=`{}`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Credential Type:\n\nMake a DELETE request to this resource to delete this credential type.", + "operationId": "api_credential_types_read", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "id": 1, + "injectors": {}, + "inputs": {}, + "kind": "cloud", + "managed": false, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "Some Other Name", + "namespace": null, + "related": { + "activity_stream": "/api/v2/credential_types/1/activity_stream/", + "credentials": "/api/v2/credential_types/1/credentials/" + }, + "summary_fields": { + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "credential_type", + "url": "/api/v2/credential_types/1/" + } + }, + "schema": { + "$ref": "#/definitions/CredentialType" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single credential type", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this credential type. (integer)\n* `type`: Data type for this credential type. (choice)\n* `url`: URL for this credential type. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential type was created. (datetime)\n* `modified`: Timestamp when this credential type was last modified. (datetime)\n* `name`: Name of this credential type. (string)\n* `description`: Optional description of this credential type. (string)\n* `kind`: (choice)\n - `ssh`: Machine\n - `vault`: Vault\n - `net`: Network\n - `scm`: Source Control\n - `cloud`: Cloud\n - `registry`: Container Registry\n - `token`: Personal Access Token\n - `insights`: Insights\n - `external`: External\n - `kubernetes`: Kubernetes\n - `galaxy`: Galaxy/Automation Hub\n - `cryptography`: Cryptography\n* `namespace`: (string)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `injectors`: Enter injectors using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n\n\n\n\n\n# Update a Credential Type:\n\nMake a PUT or PATCH request to this resource to update this\ncredential type. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this credential type. (string, required)\n* `description`: Optional description of this credential type. (string, default=`\"\"`)\n* `kind`: (choice, required)\n - `net`: Network\n - `cloud`: Cloud\n\n\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json, default=`{}`)\n* `injectors`: Enter injectors using either JSON or YAML syntax. Refer to the documentation for example syntax. (json, default=`{}`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Credential Type:\n\nMake a DELETE request to this resource to delete this credential type.", + "operationId": "api_credential_types_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "name": "Some Other Name" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "id": 1, + "injectors": {}, + "inputs": {}, + "kind": "cloud", + "managed": false, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "Some Other Name", + "namespace": null, + "related": { + "activity_stream": "/api/v2/credential_types/1/activity_stream/", + "credentials": "/api/v2/credential_types/1/credentials/" + }, + "summary_fields": { + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "credential_type", + "url": "/api/v2/credential_types/1/" + } + }, + "schema": { + "$ref": "#/definitions/CredentialType" + } + }, + "401": { + "examples": { + "application/json": { + "detail": "Authentication credentials were not provided. To establish a login session, visit /api/login/." + } + } + }, + "403": { + "examples": { + "application/json": { + "detail": "Modifications to inputs are not allowed for credential types that are in use" + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single credential type", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this credential type. (integer)\n* `type`: Data type for this credential type. (choice)\n* `url`: URL for this credential type. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential type was created. (datetime)\n* `modified`: Timestamp when this credential type was last modified. (datetime)\n* `name`: Name of this credential type. (string)\n* `description`: Optional description of this credential type. (string)\n* `kind`: (choice)\n - `ssh`: Machine\n - `vault`: Vault\n - `net`: Network\n - `scm`: Source Control\n - `cloud`: Cloud\n - `registry`: Container Registry\n - `token`: Personal Access Token\n - `insights`: Insights\n - `external`: External\n - `kubernetes`: Kubernetes\n - `galaxy`: Galaxy/Automation Hub\n - `cryptography`: Cryptography\n* `namespace`: (string)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `injectors`: Enter injectors using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n\n\n\n\n\n# Update a Credential Type:\n\nMake a PUT or PATCH request to this resource to update this\ncredential type. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this credential type. (string, required)\n* `description`: Optional description of this credential type. (string, default=`\"\"`)\n* `kind`: (choice, required)\n - `net`: Network\n - `cloud`: Cloud\n\n\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json, default=`{}`)\n* `injectors`: Enter injectors using either JSON or YAML syntax. Refer to the documentation for example syntax. (json, default=`{}`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Credential Type:\n\nMake a DELETE request to this resource to delete this credential type.", + "operationId": "api_credential_types_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/CredentialType" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/CredentialType" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single credential type", + "tags": [ + "api" + ] + } + }, + "/api/v2/credential_types/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\ncredential type.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credential_types_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/credential_types/{id}/credentials/": { + "get": { + "description": "credentials associated with the selected\ncredential type.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credential_types_credentials_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Credential" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "credentials associated with the selected\ncredential type.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credential_types_credentials_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Credential" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Credential" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/credential_types/{id}/test/": { + "get": { + "description": "record containing the following fields:", + "operationId": "api_credential_types_test_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Empty" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single credential type", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "record containing the following fields:", + "operationId": "api_credential_types_test_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Empty" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Empty" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single credential type", + "tags": [ + "api" + ] + } + }, + "/api/v2/credentials/": { + "get": { + "description": "credentials.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: Inherit permissions from organization roles. If provided on creation, do not give either user or team. (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credentials_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "cloud": true, + "created": "2018-02-01T08:00:00.000000Z", + "credential_type": 1, + "description": "", + "id": 1, + "inputs": { + "api_token": "$encrypted$" + }, + "kind": null, + "kubernetes": false, + "managed": false, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "Best credential ever", + "organization": 1, + "related": { + "access_list": "/api/v2/credentials/1/access_list/", + "activity_stream": "/api/v2/credentials/1/activity_stream/", + "copy": "/api/v2/credentials/1/copy/", + "credential_type": "/api/v2/credential_types/1/", + "input_sources": "/api/v2/credentials/1/input_sources/", + "object_roles": "/api/v2/credentials/1/object_roles/", + "organization": "/api/v2/organizations/1/", + "owner_teams": "/api/v2/credentials/1/owner_teams/", + "owner_users": "/api/v2/credentials/1/owner_users/" + }, + "summary_fields": { + "credential_type": { + "description": "", + "id": 1, + "name": "MyCloud" + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the credential", + "id": 16, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the credential", + "id": 18, + "name": "Read" + }, + "use_role": { + "description": "Can use the credential in a job template", + "id": 17, + "name": "Use" + } + }, + "organization": { + "description": "test-org-desc", + "id": 1, + "name": "test-org" + }, + "owners": [ + { + "description": "test-org-desc", + "id": 1, + "name": "test-org", + "type": "organization", + "url": "/api/v2/organizations/1/" + } + ], + "user_capabilities": { + "copy": true, + "delete": true, + "edit": true, + "use": true + } + }, + "type": "credential", + "url": "/api/v2/credentials/1/" + } + ] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/CredentialSerializerCreate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + }, + "400": { + "examples": { + "application/json": { + "detail": "Credential has no field named 'password'" + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "credentials.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: Inherit permissions from organization roles. If provided on creation, do not give either user or team. (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credentials_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "credential_type": 1, + "inputs": { + "server_url": "http://foo.com" + }, + "name": "Second Best Credential Ever", + "organization": 1 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "cloud": false, + "created": "2018-02-01T08:00:00.000000Z", + "credential_type": 1, + "description": "", + "id": 1, + "inputs": { + "server_url": "http://foo.com" + }, + "kind": null, + "kubernetes": false, + "managed": false, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "Second Best Credential Ever", + "organization": 1, + "related": { + "access_list": "/api/v2/credentials/1/access_list/", + "activity_stream": "/api/v2/credentials/1/activity_stream/", + "copy": "/api/v2/credentials/1/copy/", + "credential_type": "/api/v2/credential_types/1/", + "input_sources": "/api/v2/credentials/1/input_sources/", + "object_roles": "/api/v2/credentials/1/object_roles/", + "organization": "/api/v2/organizations/1/", + "owner_teams": "/api/v2/credentials/1/owner_teams/", + "owner_users": "/api/v2/credentials/1/owner_users/" + }, + "summary_fields": { + "credential_type": { + "description": "", + "id": 1, + "name": "MyTestCredentialType" + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the credential", + "id": 16, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the credential", + "id": 18, + "name": "Read" + }, + "use_role": { + "description": "Can use the credential in a job template", + "id": 17, + "name": "Use" + } + }, + "organization": { + "description": "test-org-desc", + "id": 1, + "name": "test-org" + }, + "owners": [ + { + "description": "test-org-desc", + "id": 1, + "name": "test-org", + "type": "organization", + "url": "/api/v2/organizations/1/" + } + ], + "user_capabilities": { + "copy": true, + "delete": true, + "edit": true, + "use": true + } + }, + "type": "credential", + "url": "/api/v2/credentials/1/" + } + }, + "schema": { + "$ref": "#/definitions/CredentialSerializerCreate" + } + }, + "400": { + "examples": { + "application/json": { + "inputs": { + "server_url": [ + "Invalid URL: http:domain:8080" + ] + } + } + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/credentials/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n\n\n# Update a Credential:\n\nMake a PUT or PATCH request to this resource to update this\ncredential. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this credential. (string, required)\n* `description`: Optional description of this credential. (string, default=`\"\"`)\n* `organization`: (id, default=`None`)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id, required)\n\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json, default=`{}`)\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Credential:\n\nMake a DELETE request to this resource to delete this credential.", + "operationId": "api_credentials_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single credential", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n\n\n# Update a Credential:\n\nMake a PUT or PATCH request to this resource to update this\ncredential. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this credential. (string, required)\n* `description`: Optional description of this credential. (string, default=`\"\"`)\n* `organization`: (id, default=`None`)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id, required)\n\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json, default=`{}`)\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Credential:\n\nMake a DELETE request to this resource to delete this credential.", + "operationId": "api_credentials_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Credential" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single credential", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n\n\n# Update a Credential:\n\nMake a PUT or PATCH request to this resource to update this\ncredential. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this credential. (string, required)\n* `description`: Optional description of this credential. (string, default=`\"\"`)\n* `organization`: (id, default=`None`)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id, required)\n\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json, default=`{}`)\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Credential:\n\nMake a DELETE request to this resource to delete this credential.", + "operationId": "api_credentials_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "inputs": { + "password": "secret", + "username": "joe" + } + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "cloud": false, + "created": "2018-02-01T08:00:00.000000Z", + "credential_type": 1, + "description": "", + "id": 1, + "inputs": { + "password": "$encrypted$", + "username": "joe" + }, + "kind": "ssh", + "kubernetes": false, + "managed": false, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "Best credential ever", + "organization": 1, + "related": { + "access_list": "/api/v2/credentials/1/access_list/", + "activity_stream": "/api/v2/credentials/1/activity_stream/", + "copy": "/api/v2/credentials/1/copy/", + "credential_type": "/api/v2/credential_types/1/", + "input_sources": "/api/v2/credentials/1/input_sources/", + "object_roles": "/api/v2/credentials/1/object_roles/", + "organization": "/api/v2/organizations/1/", + "owner_teams": "/api/v2/credentials/1/owner_teams/", + "owner_users": "/api/v2/credentials/1/owner_users/" + }, + "summary_fields": { + "credential_type": { + "description": "", + "id": 1, + "name": "Machine" + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the credential", + "id": 16, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the credential", + "id": 18, + "name": "Read" + }, + "use_role": { + "description": "Can use the credential in a job template", + "id": 17, + "name": "Use" + } + }, + "organization": { + "description": "test-org-desc", + "id": 1, + "name": "test-org" + }, + "owners": [ + { + "description": "test-org-desc", + "id": 1, + "name": "test-org", + "type": "organization", + "url": "/api/v2/organizations/1/" + } + ], + "user_capabilities": { + "copy": true, + "delete": true, + "edit": true, + "use": true + } + }, + "type": "credential", + "url": "/api/v2/credentials/1/" + } + }, + "schema": { + "$ref": "#/definitions/Credential" + } + }, + "400": { + "examples": { + "application/json": { + "credential_type": [ + "You cannot change the credential type of the credential, as it may break the functionality of the resources using it." + ] + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single credential", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n\n\n# Update a Credential:\n\nMake a PUT or PATCH request to this resource to update this\ncredential. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this credential. (string, required)\n* `description`: Optional description of this credential. (string, default=`\"\"`)\n* `organization`: (id, default=`None`)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id, required)\n\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json, default=`{}`)\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Credential:\n\nMake a DELETE request to this resource to delete this credential.", + "operationId": "api_credentials_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "credential_type": 1, + "inputs": { + "password": "", + "username": "joe" + }, + "name": "Best credential ever", + "organization": 1 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "cloud": false, + "created": "2018-02-01T08:00:00.000000Z", + "credential_type": 1, + "description": "", + "id": 1, + "inputs": { + "username": "joe" + }, + "kind": "ssh", + "kubernetes": false, + "managed": false, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "Best credential ever", + "organization": 1, + "related": { + "access_list": "/api/v2/credentials/1/access_list/", + "activity_stream": "/api/v2/credentials/1/activity_stream/", + "copy": "/api/v2/credentials/1/copy/", + "credential_type": "/api/v2/credential_types/1/", + "input_sources": "/api/v2/credentials/1/input_sources/", + "object_roles": "/api/v2/credentials/1/object_roles/", + "organization": "/api/v2/organizations/1/", + "owner_teams": "/api/v2/credentials/1/owner_teams/", + "owner_users": "/api/v2/credentials/1/owner_users/" + }, + "summary_fields": { + "credential_type": { + "description": "", + "id": 1, + "name": "Machine" + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the credential", + "id": 16, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the credential", + "id": 18, + "name": "Read" + }, + "use_role": { + "description": "Can use the credential in a job template", + "id": 17, + "name": "Use" + } + }, + "organization": { + "description": "test-org-desc", + "id": 1, + "name": "test-org" + }, + "owners": [ + { + "description": "test-org-desc", + "id": 1, + "name": "test-org", + "type": "organization", + "url": "/api/v2/organizations/1/" + } + ], + "user_capabilities": { + "copy": true, + "delete": true, + "edit": true, + "use": true + } + }, + "type": "credential", + "url": "/api/v2/credentials/1/" + } + }, + "schema": { + "$ref": "#/definitions/Credential" + } + }, + "400": { + "examples": { + "application/json": { + "inputs": { + "ssh_key_data": [ + "Invalid certificate or key: invalid-key..." + ] + } + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single credential", + "tags": [ + "api" + ] + } + }, + "/api/v2/credentials/{id}/access_list/": { + "get": { + "description": "users.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credentials_access_list_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ResourceAccessListElement" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/credentials/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\ncredential.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credentials_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/credentials/{id}/copy/": { + "get": { + "description": "", + "operationId": "api_credentials_copy_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Copy" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "No Description for get on /api/{version}/credentials/{id}/copy/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_credentials_copy_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Copy" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Copy" + } + } + }, + "summary": "No Description for post on /api/{version}/credentials/{id}/copy/", + "tags": [ + "api" + ] + } + }, + "/api/v2/credentials/{id}/input_sources/": { + "get": { + "description": "credential input sources associated with the selected\ncredential.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credential input sources\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential input source records. \n\n## Results\n\nEach credential input source data structure includes the following fields:\n\n* `id`: Database ID for this credential input source. (integer)\n* `type`: Data type for this credential input source. (choice)\n* `url`: URL for this credential input source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential input source was created. (datetime)\n* `modified`: Timestamp when this credential input source was last modified. (datetime)\n* `description`: Optional description of this credential input source. (string)\n* `input_field_name`: (string)\n* `metadata`: (json)\n* `target_credential`: (id)\n* `source_credential`: (id)\n\n\n\n## Sorting\n\nTo specify that credential input sources are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credentials_input_sources_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "id": 1, + "input_field_name": "vault_password", + "metadata": { + "key": "some_key" + }, + "modified": "2018-02-01T08:00:00.000000Z", + "related": { + "source_credential": "/api/v2/credentials/2/", + "target_credential": "/api/v2/credentials/1/" + }, + "source_credential": 2, + "summary_fields": { + "source_credential": { + "cloud": false, + "credential_type_id": 2, + "description": "", + "id": 2, + "name": "external-cred" + }, + "target_credential": { + "cloud": false, + "credential_type_id": 1, + "description": "", + "id": 1, + "kind": "vault", + "name": "test-cred" + }, + "user_capabilities": { + "delete": true + } + }, + "target_credential": 1, + "type": "credential_input_source", + "url": "/api/v2/credential_input_sources/1/" + } + ] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/CredentialInputSource" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "credential input sources associated with the selected\ncredential.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credential input sources\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential input source records. \n\n## Results\n\nEach credential input source data structure includes the following fields:\n\n* `id`: Database ID for this credential input source. (integer)\n* `type`: Data type for this credential input source. (choice)\n* `url`: URL for this credential input source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential input source was created. (datetime)\n* `modified`: Timestamp when this credential input source was last modified. (datetime)\n* `description`: Optional description of this credential input source. (string)\n* `input_field_name`: (string)\n* `metadata`: (json)\n* `target_credential`: (id)\n* `source_credential`: (id)\n\n\n\n## Sorting\n\nTo specify that credential input sources are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credentials_input_sources_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "input_field_name": "vault_password", + "metadata": { + "key": "some_key" + }, + "source_credential": 2 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "id": 1, + "input_field_name": "vault_password", + "metadata": { + "key": "some_key" + }, + "modified": "2018-02-01T08:00:00.000000Z", + "related": { + "source_credential": "/api/v2/credentials/2/", + "target_credential": "/api/v2/credentials/1/" + }, + "source_credential": 2, + "summary_fields": { + "source_credential": { + "cloud": false, + "credential_type_id": 2, + "description": "", + "id": 2, + "name": "external-cred" + }, + "target_credential": { + "cloud": false, + "credential_type_id": 1, + "description": "", + "id": 1, + "kind": "vault", + "name": "test-cred" + }, + "user_capabilities": { + "delete": true + } + }, + "target_credential": 1, + "type": "credential_input_source", + "url": "/api/v2/credential_input_sources/1/" + } + }, + "schema": { + "$ref": "#/definitions/CredentialInputSource" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/credentials/{id}/object_roles/": { + "get": { + "description": "roles associated with the selected\ncredential.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of roles\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more role records. \n\n## Results\n\nEach role data structure includes the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)\n\n\n\n## Sorting\n\nTo specify that roles are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credentials_object_roles_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Role" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/credentials/{id}/owner_teams/": { + "get": { + "description": "teams associated with the selected\ncredential.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of teams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more team records. \n\n## Results\n\nEach team data structure includes the following fields:\n\n* `id`: Database ID for this team. (integer)\n* `type`: Data type for this team. (choice)\n* `url`: URL for this team. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this team was created. (datetime)\n* `modified`: Timestamp when this team was last modified. (datetime)\n* `name`: Name of this team. (string)\n* `description`: Optional description of this team. (string)\n* `organization`: (id)\n\n\n\n## Sorting\n\nTo specify that teams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credentials_owner_teams_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Team" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/credentials/{id}/owner_users/": { + "get": { + "description": "users associated with the selected\ncredential.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_credentials_owner_users_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/User" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/credentials/{id}/test/": { + "get": { + "description": "record containing the following fields:", + "operationId": "api_credentials_test_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Empty" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single credential", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "record containing the following fields:", + "operationId": "api_credentials_test_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Empty" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Empty" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single credential", + "tags": [ + "api" + ] + } + }, + "/api/v2/dashboard/": { + "get": { + "description": "", + "operationId": "api_dashboard_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "Show Dashboard Details", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/dashboard/graphs/jobs/": { + "get": { + "description": "\n## Parameters and Filtering\n\nThe `period` of the data can be adjusted with:\n\n ?period=month\n\nWhere `month` can be replaced with `week`, `two_weeks`, or `day`. `month` is the default.\n\nThe type of job can be filtered with:\n\n ?job_type=all\n\nWhere `all` can be replaced with `inv_sync`, `playbook_run` or `scm_update`. `all` is the default.\n\n## Results\n\nData will be returned in the following format:\n\n \"jobs\": {\n \"successful\": [\n [\n 1402808400.0, \n 9\n ], ... ],\n \"failed\": [\n \t [\n 1402808400.0, \n 3\n ], ... ]\n }\n\nEach element contains an epoch timestamp represented in seconds and a numerical value indicating\nthe number of events during that time period", + "operationId": "api_dashboard_graphs_jobs_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "Make a GET request to this resource to retrieve aggregate statistics about job runs suitable for graphing.", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/execution_environments/": { + "get": { + "description": "execution environments.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of execution environments\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more execution environment records. \n\n## Results\n\nEach execution environment data structure includes the following fields:\n\n* `id`: Database ID for this execution environment. (integer)\n* `type`: Data type for this execution environment. (choice)\n* `url`: URL for this execution environment. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this execution environment was created. (datetime)\n* `modified`: Timestamp when this execution environment was last modified. (datetime)\n* `name`: Name of this execution environment. (string)\n* `description`: Optional description of this execution environment. (string)\n* `organization`: The organization used to determine access to this execution environment. (id)\n* `image`: The full image location, including the container registry, image name, and version tag. (string)\n* `managed`: (boolean)\n* `credential`: (id)\n* `pull`: Pull image before running? (choice)\n - `\"\"`: ---------\n - `always`: Always pull container before running.\n - `missing`: Only pull the image if not present before running.\n - `never`: Never pull container before running.\n\n\n\n## Sorting\n\nTo specify that execution environments are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_execution_environments_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ExecutionEnvironment" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "execution environments.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of execution environments\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more execution environment records. \n\n## Results\n\nEach execution environment data structure includes the following fields:\n\n* `id`: Database ID for this execution environment. (integer)\n* `type`: Data type for this execution environment. (choice)\n* `url`: URL for this execution environment. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this execution environment was created. (datetime)\n* `modified`: Timestamp when this execution environment was last modified. (datetime)\n* `name`: Name of this execution environment. (string)\n* `description`: Optional description of this execution environment. (string)\n* `organization`: The organization used to determine access to this execution environment. (id)\n* `image`: The full image location, including the container registry, image name, and version tag. (string)\n* `managed`: (boolean)\n* `credential`: (id)\n* `pull`: Pull image before running? (choice)\n - `\"\"`: ---------\n - `always`: Always pull container before running.\n - `missing`: Only pull the image if not present before running.\n - `never`: Never pull container before running.\n\n\n\n## Sorting\n\nTo specify that execution environments are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_execution_environments_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/ExecutionEnvironment" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/ExecutionEnvironment" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/execution_environments/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this execution environment. (integer)\n* `type`: Data type for this execution environment. (choice)\n* `url`: URL for this execution environment. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this execution environment was created. (datetime)\n* `modified`: Timestamp when this execution environment was last modified. (datetime)\n* `name`: Name of this execution environment. (string)\n* `description`: Optional description of this execution environment. (string)\n* `organization`: The organization used to determine access to this execution environment. (id)\n* `image`: The full image location, including the container registry, image name, and version tag. (string)\n* `managed`: (boolean)\n* `credential`: (id)\n* `pull`: Pull image before running? (choice)\n - `\"\"`: ---------\n - `always`: Always pull container before running.\n - `missing`: Only pull the image if not present before running.\n - `never`: Never pull container before running.\n\n\n\n\n\n# Update an Execution Environment:\n\nMake a PUT or PATCH request to this resource to update this\nexecution environment. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this execution environment. (string, required)\n* `description`: Optional description of this execution environment. (string, default=`\"\"`)\n* `organization`: The organization used to determine access to this execution environment. (id, default=``)\n* `image`: The full image location, including the container registry, image name, and version tag. (string, required)\n\n* `credential`: (id, default=``)\n* `pull`: Pull image before running? (choice)\n - `\"\"`: --------- (default)\n - `always`: Always pull container before running.\n - `missing`: Only pull the image if not present before running.\n - `never`: Never pull container before running.\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Execution Environment:\n\nMake a DELETE request to this resource to delete this execution environment.", + "operationId": "api_execution_environments_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single execution environment", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this execution environment. (integer)\n* `type`: Data type for this execution environment. (choice)\n* `url`: URL for this execution environment. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this execution environment was created. (datetime)\n* `modified`: Timestamp when this execution environment was last modified. (datetime)\n* `name`: Name of this execution environment. (string)\n* `description`: Optional description of this execution environment. (string)\n* `organization`: The organization used to determine access to this execution environment. (id)\n* `image`: The full image location, including the container registry, image name, and version tag. (string)\n* `managed`: (boolean)\n* `credential`: (id)\n* `pull`: Pull image before running? (choice)\n - `\"\"`: ---------\n - `always`: Always pull container before running.\n - `missing`: Only pull the image if not present before running.\n - `never`: Never pull container before running.\n\n\n\n\n\n# Update an Execution Environment:\n\nMake a PUT or PATCH request to this resource to update this\nexecution environment. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this execution environment. (string, required)\n* `description`: Optional description of this execution environment. (string, default=`\"\"`)\n* `organization`: The organization used to determine access to this execution environment. (id, default=``)\n* `image`: The full image location, including the container registry, image name, and version tag. (string, required)\n\n* `credential`: (id, default=``)\n* `pull`: Pull image before running? (choice)\n - `\"\"`: --------- (default)\n - `always`: Always pull container before running.\n - `missing`: Only pull the image if not present before running.\n - `never`: Never pull container before running.\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Execution Environment:\n\nMake a DELETE request to this resource to delete this execution environment.", + "operationId": "api_execution_environments_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/ExecutionEnvironment" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single execution environment", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this execution environment. (integer)\n* `type`: Data type for this execution environment. (choice)\n* `url`: URL for this execution environment. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this execution environment was created. (datetime)\n* `modified`: Timestamp when this execution environment was last modified. (datetime)\n* `name`: Name of this execution environment. (string)\n* `description`: Optional description of this execution environment. (string)\n* `organization`: The organization used to determine access to this execution environment. (id)\n* `image`: The full image location, including the container registry, image name, and version tag. (string)\n* `managed`: (boolean)\n* `credential`: (id)\n* `pull`: Pull image before running? (choice)\n - `\"\"`: ---------\n - `always`: Always pull container before running.\n - `missing`: Only pull the image if not present before running.\n - `never`: Never pull container before running.\n\n\n\n\n\n# Update an Execution Environment:\n\nMake a PUT or PATCH request to this resource to update this\nexecution environment. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this execution environment. (string, required)\n* `description`: Optional description of this execution environment. (string, default=`\"\"`)\n* `organization`: The organization used to determine access to this execution environment. (id, default=``)\n* `image`: The full image location, including the container registry, image name, and version tag. (string, required)\n\n* `credential`: (id, default=``)\n* `pull`: Pull image before running? (choice)\n - `\"\"`: --------- (default)\n - `always`: Always pull container before running.\n - `missing`: Only pull the image if not present before running.\n - `never`: Never pull container before running.\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Execution Environment:\n\nMake a DELETE request to this resource to delete this execution environment.", + "operationId": "api_execution_environments_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/ExecutionEnvironment" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/ExecutionEnvironment" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single execution environment", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this execution environment. (integer)\n* `type`: Data type for this execution environment. (choice)\n* `url`: URL for this execution environment. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this execution environment was created. (datetime)\n* `modified`: Timestamp when this execution environment was last modified. (datetime)\n* `name`: Name of this execution environment. (string)\n* `description`: Optional description of this execution environment. (string)\n* `organization`: The organization used to determine access to this execution environment. (id)\n* `image`: The full image location, including the container registry, image name, and version tag. (string)\n* `managed`: (boolean)\n* `credential`: (id)\n* `pull`: Pull image before running? (choice)\n - `\"\"`: ---------\n - `always`: Always pull container before running.\n - `missing`: Only pull the image if not present before running.\n - `never`: Never pull container before running.\n\n\n\n\n\n# Update an Execution Environment:\n\nMake a PUT or PATCH request to this resource to update this\nexecution environment. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this execution environment. (string, required)\n* `description`: Optional description of this execution environment. (string, default=`\"\"`)\n* `organization`: The organization used to determine access to this execution environment. (id, default=``)\n* `image`: The full image location, including the container registry, image name, and version tag. (string, required)\n\n* `credential`: (id, default=``)\n* `pull`: Pull image before running? (choice)\n - `\"\"`: --------- (default)\n - `always`: Always pull container before running.\n - `missing`: Only pull the image if not present before running.\n - `never`: Never pull container before running.\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Execution Environment:\n\nMake a DELETE request to this resource to delete this execution environment.", + "operationId": "api_execution_environments_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/ExecutionEnvironment" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/ExecutionEnvironment" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single execution environment", + "tags": [ + "api" + ] + } + }, + "/api/v2/execution_environments/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\nexecution environment.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_execution_environments_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/execution_environments/{id}/copy/": { + "get": { + "description": "", + "operationId": "api_execution_environments_copy_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Copy" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "No Description for get on /api/{version}/execution_environments/{id}/copy/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_execution_environments_copy_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Copy" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Copy" + } + } + }, + "summary": "No Description for post on /api/{version}/execution_environments/{id}/copy/", + "tags": [ + "api" + ] + } + }, + "/api/v2/execution_environments/{id}/unified_job_templates/": { + "get": { + "description": "unified job templates associated with the selected\nexecution environment.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of unified job templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more unified job template records. \n\n## Results\n\nEach unified job template data structure includes the following fields:\n\n* `id`: Database ID for this unified job template. (integer)\n* `type`: Data type for this unified job template. (choice)\n* `url`: URL for this unified job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this unified job template was created. (datetime)\n* `modified`: Timestamp when this unified job template was last modified. (datetime)\n* `name`: Name of this unified job template. (string)\n* `description`: Optional description of this unified job template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `execution_environment`: The container image to be used for execution. (id)\n\n\n\n## Sorting\n\nTo specify that unified job templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_execution_environments_unified_job_templates_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/UnifiedJobTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/groups/": { + "get": { + "description": "groups.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more group records. \n\n## Results\n\nEach group data structure includes the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n## Sorting\n\nTo specify that groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_groups_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Group" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "groups.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more group records. \n\n## Results\n\nEach group data structure includes the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n## Sorting\n\nTo specify that groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_groups_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Group" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Group" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/groups/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n\n\n# Update a Group:\n\nMake a PUT or PATCH request to this resource to update this\ngroup. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this group. (string, required)\n* `description`: Optional description of this group. (string, default=`\"\"`)\n* `inventory`: (id, required)\n* `variables`: Group variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Group:\n\nMake a DELETE request to this resource to delete this group.", + "operationId": "api_groups_delete", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "204": { + "description": "" + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single group", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n\n\n# Update a Group:\n\nMake a PUT or PATCH request to this resource to update this\ngroup. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this group. (string, required)\n* `description`: Optional description of this group. (string, default=`\"\"`)\n* `inventory`: (id, required)\n* `variables`: Group variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Group:\n\nMake a DELETE request to this resource to delete this group.", + "operationId": "api_groups_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Group" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single group", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n\n\n# Update a Group:\n\nMake a PUT or PATCH request to this resource to update this\ngroup. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this group. (string, required)\n* `description`: Optional description of this group. (string, default=`\"\"`)\n* `inventory`: (id, required)\n* `variables`: Group variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Group:\n\nMake a DELETE request to this resource to delete this group.", + "operationId": "api_groups_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Group" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Group" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single group", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n\n\n# Update a Group:\n\nMake a PUT or PATCH request to this resource to update this\ngroup. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this group. (string, required)\n* `description`: Optional description of this group. (string, default=`\"\"`)\n* `inventory`: (id, required)\n* `variables`: Group variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Group:\n\nMake a DELETE request to this resource to delete this group.", + "operationId": "api_groups_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "description": "Hello world", + "name": "New name" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "Hello world", + "id": 1, + "inventory": 1, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "New name", + "related": { + "activity_stream": "/api/v2/groups/1/activity_stream/", + "ad_hoc_commands": "/api/v2/groups/1/ad_hoc_commands/", + "all_hosts": "/api/v2/groups/1/all_hosts/", + "children": "/api/v2/groups/1/children/", + "hosts": "/api/v2/groups/1/hosts/", + "inventory": "/api/v2/inventories/1/", + "inventory_sources": "/api/v2/groups/1/inventory_sources/", + "job_events": "/api/v2/groups/1/job_events/", + "job_host_summaries": "/api/v2/groups/1/job_host_summaries/", + "potential_children": "/api/v2/groups/1/potential_children/", + "variable_data": "/api/v2/groups/1/variable_data/" + }, + "summary_fields": { + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + }, + "user_capabilities": { + "copy": true, + "delete": true, + "edit": true + } + }, + "type": "group", + "url": "/api/v2/groups/1/", + "variables": "" + } + }, + "schema": { + "$ref": "#/definitions/Group" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single group", + "tags": [ + "api" + ] + } + }, + "/api/v2/groups/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\ngroup.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_groups_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/groups/{id}/ad_hoc_commands/": { + "get": { + "description": "ad hoc commands associated with the selected\ngroup.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of ad hoc commands\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more ad hoc command records. \n\n## Results\n\nEach ad hoc command data structure includes the following fields:\n\n* `id`: Database ID for this ad hoc command. (integer)\n* `type`: Data type for this ad hoc command. (choice)\n* `url`: URL for this ad hoc command. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this ad hoc command was created. (datetime)\n* `modified`: Timestamp when this ad hoc command was last modified. (datetime)\n* `name`: Name of this ad hoc command. (string)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `limit`: (string)\n* `credential`: (id)\n* `module_name`: (choice)\n - `command`\n - `shell`\n - `yum`\n - `apt`\n - `apt_key`\n - `apt_repository`\n - `apt_rpm`\n - `service`\n - `group`\n - `user`\n - `mount`\n - `ping`\n - `selinux`\n - `setup`\n - `win_ping`\n - `win_service`\n - `win_updates`\n - `win_group`\n - `win_user`\n* `module_args`: (string)\n* `forks`: (integer)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (string)\n* `become_enabled`: (boolean)\n* `diff_mode`: (boolean)\n\n\n\n## Sorting\n\nTo specify that ad hoc commands are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_groups_ad_hoc_commands_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/AdHocCommandList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "ad hoc commands associated with the selected\ngroup.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of ad hoc commands\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more ad hoc command records. \n\n## Results\n\nEach ad hoc command data structure includes the following fields:\n\n* `id`: Database ID for this ad hoc command. (integer)\n* `type`: Data type for this ad hoc command. (choice)\n* `url`: URL for this ad hoc command. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this ad hoc command was created. (datetime)\n* `modified`: Timestamp when this ad hoc command was last modified. (datetime)\n* `name`: Name of this ad hoc command. (string)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `limit`: (string)\n* `credential`: (id)\n* `module_name`: (choice)\n - `command`\n - `shell`\n - `yum`\n - `apt`\n - `apt_key`\n - `apt_repository`\n - `apt_rpm`\n - `service`\n - `group`\n - `user`\n - `mount`\n - `ping`\n - `selinux`\n - `setup`\n - `win_ping`\n - `win_service`\n - `win_updates`\n - `win_group`\n - `win_user`\n* `module_args`: (string)\n* `forks`: (integer)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (string)\n* `become_enabled`: (boolean)\n* `diff_mode`: (boolean)\n\n\n\n## Sorting\n\nTo specify that ad hoc commands are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_groups_ad_hoc_commands_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/AdHocCommandList" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/AdHocCommandList" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/groups/{id}/all_hosts/": { + "get": { + "description": "hosts directly or indirectly belonging to this\ngroup.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of hosts\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more host records. \n\n## Results\n\nEach host data structure includes the following fields:\n\n* `id`: Database ID for this host. (integer)\n* `type`: Data type for this host. (choice)\n* `url`: URL for this host. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this host was created. (datetime)\n* `modified`: Timestamp when this host was last modified. (datetime)\n* `name`: Name of this host. (string)\n* `description`: Optional description of this host. (string)\n* `inventory`: (id)\n* `enabled`: Is this host online and available for running jobs? (boolean)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string)\n* `variables`: Host variables in JSON or YAML format. (json)\n* `has_active_failures`: (field)\n* `has_inventory_sources`: (field)\n* `last_job`: (id)\n* `last_job_host_summary`: (id)\n* `ansible_facts_modified`: The date and time ansible_facts was last modified. (datetime)\n\n\n\n## Sorting\n\nTo specify that hosts are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_groups_all_hosts_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Host" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of all", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/groups/{id}/children/": { + "get": { + "description": "groups associated with the selected\ngroup.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more group records. \n\n## Results\n\nEach group data structure includes the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n## Sorting\n\nTo specify that groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_groups_children_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Group" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "groups associated with the selected\ngroup.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more group records. \n\n## Results\n\nEach group data structure includes the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n## Sorting\n\nTo specify that groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_groups_children_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "description": "Hello world", + "name": "New name" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "Hello world", + "id": 2, + "inventory": 1, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "New name", + "related": { + "activity_stream": "/api/v2/groups/2/activity_stream/", + "ad_hoc_commands": "/api/v2/groups/2/ad_hoc_commands/", + "all_hosts": "/api/v2/groups/2/all_hosts/", + "children": "/api/v2/groups/2/children/", + "hosts": "/api/v2/groups/2/hosts/", + "inventory": "/api/v2/inventories/1/", + "inventory_sources": "/api/v2/groups/2/inventory_sources/", + "job_events": "/api/v2/groups/2/job_events/", + "job_host_summaries": "/api/v2/groups/2/job_host_summaries/", + "potential_children": "/api/v2/groups/2/potential_children/", + "variable_data": "/api/v2/groups/2/variable_data/" + }, + "summary_fields": { + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + }, + "user_capabilities": { + "copy": true, + "delete": true, + "edit": true + } + }, + "type": "group", + "url": "/api/v2/groups/2/", + "variables": "" + } + }, + "schema": { + "$ref": "#/definitions/Group" + } + }, + "400": { + "examples": { + "application/json": { + "error": "Cyclical Group association." + } + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/groups/{id}/hosts/": { + "get": { + "description": "hosts associated with the selected\ngroup.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of hosts\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more host records. \n\n## Results\n\nEach host data structure includes the following fields:\n\n* `id`: Database ID for this host. (integer)\n* `type`: Data type for this host. (choice)\n* `url`: URL for this host. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this host was created. (datetime)\n* `modified`: Timestamp when this host was last modified. (datetime)\n* `name`: Name of this host. (string)\n* `description`: Optional description of this host. (string)\n* `inventory`: (id)\n* `enabled`: Is this host online and available for running jobs? (boolean)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string)\n* `variables`: Host variables in JSON or YAML format. (json)\n* `has_active_failures`: (field)\n* `has_inventory_sources`: (field)\n* `last_job`: (id)\n* `last_job_host_summary`: (id)\n* `ansible_facts_modified`: The date and time ansible_facts was last modified. (datetime)\n\n\n\n## Sorting\n\nTo specify that hosts are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_groups_hosts_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Host" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "hosts associated with the selected\ngroup.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of hosts\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more host records. \n\n## Results\n\nEach host data structure includes the following fields:\n\n* `id`: Database ID for this host. (integer)\n* `type`: Data type for this host. (choice)\n* `url`: URL for this host. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this host was created. (datetime)\n* `modified`: Timestamp when this host was last modified. (datetime)\n* `name`: Name of this host. (string)\n* `description`: Optional description of this host. (string)\n* `inventory`: (id)\n* `enabled`: Is this host online and available for running jobs? (boolean)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string)\n* `variables`: Host variables in JSON or YAML format. (json)\n* `has_active_failures`: (field)\n* `has_inventory_sources`: (field)\n* `last_job`: (id)\n* `last_job_host_summary`: (id)\n* `ansible_facts_modified`: The date and time ansible_facts was last modified. (datetime)\n\n\n\n## Sorting\n\nTo specify that hosts are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_groups_hosts_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "description": "Hello world", + "name": "New name" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "ansible_facts_modified": null, + "created": "2018-02-01T08:00:00.000000Z", + "description": "Hello world", + "enabled": true, + "has_active_failures": false, + "has_inventory_sources": false, + "id": 1, + "instance_id": "", + "inventory": 1, + "last_job": null, + "last_job_host_summary": null, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "New name", + "related": { + "activity_stream": "/api/v2/hosts/1/activity_stream/", + "ad_hoc_command_events": "/api/v2/hosts/1/ad_hoc_command_events/", + "ad_hoc_commands": "/api/v2/hosts/1/ad_hoc_commands/", + "all_groups": "/api/v2/hosts/1/all_groups/", + "ansible_facts": "/api/v2/hosts/1/ansible_facts/", + "groups": "/api/v2/hosts/1/groups/", + "inventory": "/api/v2/inventories/1/", + "inventory_sources": "/api/v2/hosts/1/inventory_sources/", + "job_events": "/api/v2/hosts/1/job_events/", + "job_host_summaries": "/api/v2/hosts/1/job_host_summaries/", + "smart_inventories": "/api/v2/hosts/1/smart_inventories/", + "variable_data": "/api/v2/hosts/1/variable_data/" + }, + "summary_fields": { + "groups": { + "count": 0, + "results": [] + }, + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + }, + "recent_jobs": [], + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "host", + "url": "/api/v2/hosts/1/", + "variables": "" + } + }, + "schema": { + "$ref": "#/definitions/Host" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/groups/{id}/inventory_sources/": { + "get": { + "description": "inventory sources associated with the selected\ngroup.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventory sources\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory source records. \n\n## Results\n\nEach inventory source data structure includes the following fields:\n\n* `id`: Database ID for this inventory source. (integer)\n* `type`: Data type for this inventory source. (choice)\n* `url`: URL for this inventory source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory source was created. (datetime)\n* `modified`: Timestamp when this inventory source was last modified. (datetime)\n* `name`: Name of this inventory source. (string)\n* `description`: Optional description of this inventory source. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `none`: No External Source\n* `execution_environment`: The container image to be used for execution. (id)\n* `inventory`: (id)\n* `update_on_launch`: (boolean)\n* `update_cache_timeout`: (integer)\n* `source_project`: Project containing inventory file used as source. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n## Sorting\n\nTo specify that inventory sources are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_groups_inventory_sources_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InventorySource" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/groups/{id}/job_events/": { + "get": { + "description": "job events associated with the selected\ngroup.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of job events\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more job event records. \n\n## Results\n\nEach job event data structure includes the following fields:\n\n* `id`: Database ID for this job event. (integer)\n* `type`: Data type for this job event. (choice)\n* `url`: URL for this job event. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job event was created. (datetime)\n* `modified`: Timestamp when this job event was last modified. (datetime)\n* `job`: (id)\n* `event`: (choice)\n - `runner_on_failed`: Host Failed\n - `runner_on_start`: Host Started\n - `runner_on_ok`: Host OK\n - `runner_on_error`: Host Failure\n - `runner_on_skipped`: Host Skipped\n - `runner_on_unreachable`: Host Unreachable\n - `runner_on_no_hosts`: No Hosts Remaining\n - `runner_on_async_poll`: Host Polling\n - `runner_on_async_ok`: Host Async OK\n - `runner_on_async_failed`: Host Async Failure\n - `runner_item_on_ok`: Item OK\n - `runner_item_on_failed`: Item Failed\n - `runner_item_on_skipped`: Item Skipped\n - `runner_retry`: Host Retry\n - `runner_on_file_diff`: File Difference\n - `playbook_on_start`: Playbook Started\n - `playbook_on_notify`: Running Handlers\n - `playbook_on_include`: Including File\n - `playbook_on_no_hosts_matched`: No Hosts Matched\n - `playbook_on_no_hosts_remaining`: No Hosts Remaining\n - `playbook_on_task_start`: Task Started\n - `playbook_on_vars_prompt`: Variables Prompted\n - `playbook_on_setup`: Gathering Facts\n - `playbook_on_import_for_host`: internal: on Import for Host\n - `playbook_on_not_import_for_host`: internal: on Not Import for Host\n - `playbook_on_play_start`: Play Started\n - `playbook_on_stats`: Playbook Complete\n - `debug`: Debug\n - `verbose`: Verbose\n - `deprecated`: Deprecated\n - `warning`: Warning\n - `system_warning`: System Warning\n - `error`: Error\n* `counter`: (integer)\n* `event_display`: (string)\n* `event_data`: (json)\n* `event_level`: (integer)\n* `failed`: (boolean)\n* `changed`: (boolean)\n* `uuid`: (string)\n* `parent_uuid`: (string)\n* `host`: (id)\n* `host_name`: (string)\n* `playbook`: (string)\n* `play`: (string)\n* `task`: (string)\n* `role`: (string)\n* `stdout`: (string)\n* `start_line`: (integer)\n* `end_line`: (integer)\n* `verbosity`: (integer)\n\n\n\n## Sorting\n\nTo specify that job events are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_groups_job_events_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/JobEvent" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/groups/{id}/job_host_summaries/": { + "get": { + "description": "job host summaries associated with the selected\ngroup.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of job host summaries\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more job host summary records. \n\n## Results\n\nEach job host summary data structure includes the following fields:\n\n* `id`: Database ID for this job host summary. (integer)\n* `type`: Data type for this job host summary. (choice)\n* `url`: URL for this job host summary. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job host summary was created. (datetime)\n* `modified`: Timestamp when this job host summary was last modified. (datetime)\n* `job`: (id)\n* `host`: (id)\n* `constructed_host`: Only for jobs run against constructed inventories, this links to the host inside the constructed inventory. (id)\n* `host_name`: (string)\n* `changed`: (integer)\n* `dark`: (integer)\n* `failures`: (integer)\n* `ok`: (integer)\n* `processed`: (integer)\n* `skipped`: (integer)\n* `failed`: (boolean)\n* `ignored`: (integer)\n* `rescued`: (integer)\n\n\n\n## Sorting\n\nTo specify that job host summaries are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_groups_job_host_summaries_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/JobHostSummary" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/groups/{id}/potential_children/": { + "get": { + "description": "groups available to be added as children of the\ncurrent group.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more group records. \n\n## Results\n\nEach group data structure includes the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n## Sorting\n\nTo specify that groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_groups_potential_children_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Group" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/groups/{id}/variable_data/": { + "get": { + "consumes": [ + "application/json", + "application/yaml" + ], + "description": "group.\n\n\n\n# Update Group Variable Data:\n\nMake a PUT or PATCH request to this resource to update variables defined for a\ngroup.", + "operationId": "api_groups_variable_data_read", + "parameters": [], + "produces": [ + "application/json", + "application/yaml" + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/GroupVariableData" + } + } + }, + "summary": "Make a GET request to this resource to retrieve all variables defined for a", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "consumes": [ + "application/json", + "application/yaml" + ], + "description": "group.\n\n\n\n# Update Group Variable Data:\n\nMake a PUT or PATCH request to this resource to update variables defined for a\ngroup.", + "operationId": "api_groups_variable_data_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/GroupVariableData" + } + } + ], + "produces": [ + "application/json", + "application/yaml" + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/GroupVariableData" + } + } + }, + "summary": "Make a GET request to this resource to retrieve all variables defined for a", + "tags": [ + "api" + ] + }, + "put": { + "consumes": [ + "application/json", + "application/yaml" + ], + "description": "group.\n\n\n\n# Update Group Variable Data:\n\nMake a PUT or PATCH request to this resource to update variables defined for a\ngroup.", + "operationId": "api_groups_variable_data_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/GroupVariableData" + } + } + ], + "produces": [ + "application/json", + "application/yaml" + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/GroupVariableData" + } + } + }, + "summary": "Make a GET request to this resource to retrieve all variables defined for a", + "tags": [ + "api" + ] + } + }, + "/api/v2/host_metric_summary_monthly/": { + "get": { + "description": "host metric summary monthlys.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of host metric summary monthlys\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more host metric summary monthly records. \n\n## Results\n\nEach host metric summary monthly data structure includes the following fields:\n\n* `id`: Database ID for this host metric summary monthly. (integer)\n* `date`: (date)\n* `license_consumed`: How many unique hosts are consumed from the license (integer)\n* `license_capacity`: 'License capacity as max. number of unique hosts (integer)\n* `hosts_added`: How many hosts were added in the associated month, consuming more license capacity (integer)\n* `hosts_deleted`: How many hosts were deleted in the associated month, freeing the license capacity (integer)\n* `indirectly_managed_hosts`: Manually entered number indirectly managed hosts for a certain month (integer)\n\n\n\n## Sorting\n\nTo specify that host metric summary monthlys are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_host_metric_summary_monthly_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/HostMetricSummaryMonthly" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/host_metrics/": { + "get": { + "description": "host metrics.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of host metrics\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more host metric records. \n\n## Results\n\nEach host metric data structure includes the following fields:\n\n* `id`: Database ID for this host metric. (integer)\n* `hostname`: (string)\n* `url`: URL for this host metric. (string)\n* `first_automation`: When the host was first automated against (datetime)\n* `last_automation`: When the host was last automated against (datetime)\n* `last_deleted`: When the host was last deleted (datetime)\n* `automated_counter`: How many times was the host automated (integer)\n* `deleted_counter`: How many times was the host deleted (integer)\n* `deleted`: Boolean flag saying whether the host is deleted and therefore not counted into the subscription consumption (boolean)\n* `used_in_inventories`: How many inventories contain this host (integer)\n\n\n\n## Sorting\n\nTo specify that host metrics are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_host_metrics_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/HostMetric" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/host_metrics/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this host metric. (integer)\n* `hostname`: (string)\n* `url`: URL for this host metric. (string)\n* `first_automation`: When the host was first automated against (datetime)\n* `last_automation`: When the host was last automated against (datetime)\n* `last_deleted`: When the host was last deleted (datetime)\n* `automated_counter`: How many times was the host automated (integer)\n* `deleted_counter`: How many times was the host deleted (integer)\n* `deleted`: Boolean flag saying whether the host is deleted and therefore not counted into the subscription consumption (boolean)\n* `used_in_inventories`: How many inventories contain this host (integer)\n\n\n\n\n\n# Delete a Host Metric:\n\nMake a DELETE request to this resource to soft-delete this host metric.\n\nA soft deletion will mark the `deleted` field as true and exclude the host\nmetric from license calculations.\nThis may be undone later if the same hostname is automated again afterwards.", + "operationId": "api_host_metrics_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single host metric", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this host metric. (integer)\n* `hostname`: (string)\n* `url`: URL for this host metric. (string)\n* `first_automation`: When the host was first automated against (datetime)\n* `last_automation`: When the host was last automated against (datetime)\n* `last_deleted`: When the host was last deleted (datetime)\n* `automated_counter`: How many times was the host automated (integer)\n* `deleted_counter`: How many times was the host deleted (integer)\n* `deleted`: Boolean flag saying whether the host is deleted and therefore not counted into the subscription consumption (boolean)\n* `used_in_inventories`: How many inventories contain this host (integer)\n\n\n\n\n\n# Delete a Host Metric:\n\nMake a DELETE request to this resource to soft-delete this host metric.\n\nA soft deletion will mark the `deleted` field as true and exclude the host\nmetric from license calculations.\nThis may be undone later if the same hostname is automated again afterwards.", + "operationId": "api_host_metrics_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/HostMetric" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single host metric", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/hosts/": { + "get": { + "description": "hosts.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of hosts\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more host records. \n\n## Results\n\nEach host data structure includes the following fields:\n\n* `id`: Database ID for this host. (integer)\n* `type`: Data type for this host. (choice)\n* `url`: URL for this host. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this host was created. (datetime)\n* `modified`: Timestamp when this host was last modified. (datetime)\n* `name`: Name of this host. (string)\n* `description`: Optional description of this host. (string)\n* `inventory`: (id)\n* `enabled`: Is this host online and available for running jobs? (boolean)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string)\n* `variables`: Host variables in JSON or YAML format. (json)\n* `has_active_failures`: (field)\n* `has_inventory_sources`: (field)\n* `last_job`: (id)\n* `last_job_host_summary`: (id)\n* `ansible_facts_modified`: The date and time ansible_facts was last modified. (datetime)\n\n\n\n## Sorting\n\nTo specify that hosts are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_hosts_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "ansible_facts_modified": null, + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "enabled": true, + "has_active_failures": false, + "has_inventory_sources": false, + "id": 1, + "instance_id": "", + "inventory": 1, + "last_job": null, + "last_job_host_summary": null, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "host1", + "related": { + "activity_stream": "/api/v2/hosts/1/activity_stream/", + "ad_hoc_command_events": "/api/v2/hosts/1/ad_hoc_command_events/", + "ad_hoc_commands": "/api/v2/hosts/1/ad_hoc_commands/", + "all_groups": "/api/v2/hosts/1/all_groups/", + "ansible_facts": "/api/v2/hosts/1/ansible_facts/", + "groups": "/api/v2/hosts/1/groups/", + "inventory": "/api/v2/inventories/1/", + "inventory_sources": "/api/v2/hosts/1/inventory_sources/", + "job_events": "/api/v2/hosts/1/job_events/", + "job_host_summaries": "/api/v2/hosts/1/job_host_summaries/", + "smart_inventories": "/api/v2/hosts/1/smart_inventories/", + "variable_data": "/api/v2/hosts/1/variable_data/" + }, + "summary_fields": { + "groups": { + "count": 2, + "results": [ + { + "id": 1, + "name": "g1" + }, + { + "id": 2, + "name": "g2" + } + ] + }, + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + }, + "recent_jobs": [], + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "host", + "url": "/api/v2/hosts/1/", + "variables": "" + } + ] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Host" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "hosts.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of hosts\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more host records. \n\n## Results\n\nEach host data structure includes the following fields:\n\n* `id`: Database ID for this host. (integer)\n* `type`: Data type for this host. (choice)\n* `url`: URL for this host. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this host was created. (datetime)\n* `modified`: Timestamp when this host was last modified. (datetime)\n* `name`: Name of this host. (string)\n* `description`: Optional description of this host. (string)\n* `inventory`: (id)\n* `enabled`: Is this host online and available for running jobs? (boolean)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string)\n* `variables`: Host variables in JSON or YAML format. (json)\n* `has_active_failures`: (field)\n* `has_inventory_sources`: (field)\n* `last_job`: (id)\n* `last_job_host_summary`: (id)\n* `ansible_facts_modified`: The date and time ansible_facts was last modified. (datetime)\n\n\n\n## Sorting\n\nTo specify that hosts are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_hosts_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Host" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Host" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/hosts/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this host. (integer)\n* `type`: Data type for this host. (choice)\n* `url`: URL for this host. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this host was created. (datetime)\n* `modified`: Timestamp when this host was last modified. (datetime)\n* `name`: Name of this host. (string)\n* `description`: Optional description of this host. (string)\n* `inventory`: (id)\n* `enabled`: Is this host online and available for running jobs? (boolean)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string)\n* `variables`: Host variables in JSON or YAML format. (json)\n* `has_active_failures`: (field)\n* `has_inventory_sources`: (field)\n* `last_job`: (id)\n* `last_job_host_summary`: (id)\n* `ansible_facts_modified`: The date and time ansible_facts was last modified. (datetime)\n\n\n\n\n\n# Update a Host:\n\nMake a PUT or PATCH request to this resource to update this\nhost. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this host. (string, required)\n* `description`: Optional description of this host. (string, default=`\"\"`)\n* `inventory`: (id, required)\n* `enabled`: Is this host online and available for running jobs? (boolean, default=`True`)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string, default=`\"\"`)\n* `variables`: Host variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Host:\n\nMake a DELETE request to this resource to delete this host.", + "operationId": "api_hosts_delete", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "204": { + "description": "" + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single host", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this host. (integer)\n* `type`: Data type for this host. (choice)\n* `url`: URL for this host. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this host was created. (datetime)\n* `modified`: Timestamp when this host was last modified. (datetime)\n* `name`: Name of this host. (string)\n* `description`: Optional description of this host. (string)\n* `inventory`: (id)\n* `enabled`: Is this host online and available for running jobs? (boolean)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string)\n* `variables`: Host variables in JSON or YAML format. (json)\n* `has_active_failures`: (field)\n* `has_inventory_sources`: (field)\n* `last_job`: (id)\n* `last_job_host_summary`: (id)\n* `ansible_facts_modified`: The date and time ansible_facts was last modified. (datetime)\n\n\n\n\n\n# Update a Host:\n\nMake a PUT or PATCH request to this resource to update this\nhost. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this host. (string, required)\n* `description`: Optional description of this host. (string, default=`\"\"`)\n* `inventory`: (id, required)\n* `enabled`: Is this host online and available for running jobs? (boolean, default=`True`)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string, default=`\"\"`)\n* `variables`: Host variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Host:\n\nMake a DELETE request to this resource to delete this host.", + "operationId": "api_hosts_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Host" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single host", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this host. (integer)\n* `type`: Data type for this host. (choice)\n* `url`: URL for this host. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this host was created. (datetime)\n* `modified`: Timestamp when this host was last modified. (datetime)\n* `name`: Name of this host. (string)\n* `description`: Optional description of this host. (string)\n* `inventory`: (id)\n* `enabled`: Is this host online and available for running jobs? (boolean)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string)\n* `variables`: Host variables in JSON or YAML format. (json)\n* `has_active_failures`: (field)\n* `has_inventory_sources`: (field)\n* `last_job`: (id)\n* `last_job_host_summary`: (id)\n* `ansible_facts_modified`: The date and time ansible_facts was last modified. (datetime)\n\n\n\n\n\n# Update a Host:\n\nMake a PUT or PATCH request to this resource to update this\nhost. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this host. (string, required)\n* `description`: Optional description of this host. (string, default=`\"\"`)\n* `inventory`: (id, required)\n* `enabled`: Is this host online and available for running jobs? (boolean, default=`True`)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string, default=`\"\"`)\n* `variables`: Host variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Host:\n\nMake a DELETE request to this resource to delete this host.", + "operationId": "api_hosts_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Host" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Host" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single host", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this host. (integer)\n* `type`: Data type for this host. (choice)\n* `url`: URL for this host. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this host was created. (datetime)\n* `modified`: Timestamp when this host was last modified. (datetime)\n* `name`: Name of this host. (string)\n* `description`: Optional description of this host. (string)\n* `inventory`: (id)\n* `enabled`: Is this host online and available for running jobs? (boolean)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string)\n* `variables`: Host variables in JSON or YAML format. (json)\n* `has_active_failures`: (field)\n* `has_inventory_sources`: (field)\n* `last_job`: (id)\n* `last_job_host_summary`: (id)\n* `ansible_facts_modified`: The date and time ansible_facts was last modified. (datetime)\n\n\n\n\n\n# Update a Host:\n\nMake a PUT or PATCH request to this resource to update this\nhost. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this host. (string, required)\n* `description`: Optional description of this host. (string, default=`\"\"`)\n* `inventory`: (id, required)\n* `enabled`: Is this host online and available for running jobs? (boolean, default=`True`)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string, default=`\"\"`)\n* `variables`: Host variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Host:\n\nMake a DELETE request to this resource to delete this host.", + "operationId": "api_hosts_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "description": "Hello world", + "name": "New name" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "ansible_facts_modified": null, + "created": "2018-02-01T08:00:00.000000Z", + "description": "Hello world", + "enabled": true, + "has_active_failures": false, + "has_inventory_sources": false, + "id": 1, + "instance_id": "", + "inventory": 1, + "last_job": null, + "last_job_host_summary": null, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "New name", + "related": { + "activity_stream": "/api/v2/hosts/1/activity_stream/", + "ad_hoc_command_events": "/api/v2/hosts/1/ad_hoc_command_events/", + "ad_hoc_commands": "/api/v2/hosts/1/ad_hoc_commands/", + "all_groups": "/api/v2/hosts/1/all_groups/", + "ansible_facts": "/api/v2/hosts/1/ansible_facts/", + "groups": "/api/v2/hosts/1/groups/", + "inventory": "/api/v2/inventories/1/", + "inventory_sources": "/api/v2/hosts/1/inventory_sources/", + "job_events": "/api/v2/hosts/1/job_events/", + "job_host_summaries": "/api/v2/hosts/1/job_host_summaries/", + "smart_inventories": "/api/v2/hosts/1/smart_inventories/", + "variable_data": "/api/v2/hosts/1/variable_data/" + }, + "summary_fields": { + "groups": { + "count": 1, + "results": [ + { + "id": 1, + "name": "single-group" + } + ] + }, + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + }, + "recent_jobs": [], + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "host", + "url": "/api/v2/hosts/1/", + "variables": "" + } + }, + "schema": { + "$ref": "#/definitions/Host" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single host", + "tags": [ + "api" + ] + } + }, + "/api/v2/hosts/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\nhost.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_hosts_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/hosts/{id}/ad_hoc_command_events/": { + "get": { + "description": "ad hoc command events associated with the selected\nhost.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of ad hoc command events\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more ad hoc command event records. \n\n## Results\n\nEach ad hoc command event data structure includes the following fields:\n\n* `id`: Database ID for this ad hoc command event. (integer)\n* `type`: Data type for this ad hoc command event. (choice)\n* `url`: URL for this ad hoc command event. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this ad hoc command event was created. (datetime)\n* `modified`: Timestamp when this ad hoc command event was last modified. (datetime)\n* `ad_hoc_command`: (id)\n* `event`: (choice)\n - `runner_on_failed`: Host Failed\n - `runner_on_ok`: Host OK\n - `runner_on_unreachable`: Host Unreachable\n - `runner_on_skipped`: Host Skipped\n - `debug`: Debug\n - `verbose`: Verbose\n - `deprecated`: Deprecated\n - `warning`: Warning\n - `system_warning`: System Warning\n - `error`: Error\n* `counter`: (integer)\n* `event_display`: (string)\n* `event_data`: (json)\n* `failed`: (boolean)\n* `changed`: (boolean)\n* `uuid`: (string)\n* `host`: (id)\n* `host_name`: (string)\n* `stdout`: (string)\n* `start_line`: (integer)\n* `end_line`: (integer)\n* `verbosity`: (integer)\n\n\n\n## Sorting\n\nTo specify that ad hoc command events are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_hosts_ad_hoc_command_events_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/AdHocCommandEvent" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/hosts/{id}/ad_hoc_commands/": { + "get": { + "description": "ad hoc commands associated with the selected\nhost.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of ad hoc commands\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more ad hoc command records. \n\n## Results\n\nEach ad hoc command data structure includes the following fields:\n\n* `id`: Database ID for this ad hoc command. (integer)\n* `type`: Data type for this ad hoc command. (choice)\n* `url`: URL for this ad hoc command. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this ad hoc command was created. (datetime)\n* `modified`: Timestamp when this ad hoc command was last modified. (datetime)\n* `name`: Name of this ad hoc command. (string)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `limit`: (string)\n* `credential`: (id)\n* `module_name`: (choice)\n - `command`\n - `shell`\n - `yum`\n - `apt`\n - `apt_key`\n - `apt_repository`\n - `apt_rpm`\n - `service`\n - `group`\n - `user`\n - `mount`\n - `ping`\n - `selinux`\n - `setup`\n - `win_ping`\n - `win_service`\n - `win_updates`\n - `win_group`\n - `win_user`\n* `module_args`: (string)\n* `forks`: (integer)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (string)\n* `become_enabled`: (boolean)\n* `diff_mode`: (boolean)\n\n\n\n## Sorting\n\nTo specify that ad hoc commands are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_hosts_ad_hoc_commands_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/AdHocCommandList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "ad hoc commands associated with the selected\nhost.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of ad hoc commands\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more ad hoc command records. \n\n## Results\n\nEach ad hoc command data structure includes the following fields:\n\n* `id`: Database ID for this ad hoc command. (integer)\n* `type`: Data type for this ad hoc command. (choice)\n* `url`: URL for this ad hoc command. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this ad hoc command was created. (datetime)\n* `modified`: Timestamp when this ad hoc command was last modified. (datetime)\n* `name`: Name of this ad hoc command. (string)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `limit`: (string)\n* `credential`: (id)\n* `module_name`: (choice)\n - `command`\n - `shell`\n - `yum`\n - `apt`\n - `apt_key`\n - `apt_repository`\n - `apt_rpm`\n - `service`\n - `group`\n - `user`\n - `mount`\n - `ping`\n - `selinux`\n - `setup`\n - `win_ping`\n - `win_service`\n - `win_updates`\n - `win_group`\n - `win_user`\n* `module_args`: (string)\n* `forks`: (integer)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (string)\n* `become_enabled`: (boolean)\n* `diff_mode`: (boolean)\n\n\n\n## Sorting\n\nTo specify that ad hoc commands are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_hosts_ad_hoc_commands_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/AdHocCommandList" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/AdHocCommandList" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/hosts/{id}/all_groups/": { + "get": { + "description": "groups of which the selected\nhost is directly or indirectly a member.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more group records. \n\n## Results\n\nEach group data structure includes the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n## Sorting\n\nTo specify that groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_hosts_all_groups_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Group" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of all", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/hosts/{id}/ansible_facts/": { + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this host. (integer)\n* `type`: Data type for this host. (choice)\n* `url`: URL for this host. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this host was created. (datetime)\n* `modified`: Timestamp when this host was last modified. (datetime)\n* `name`: Name of this host. (string)\n* `description`: Optional description of this host. (string)", + "operationId": "api_hosts_ansible_facts_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/AnsibleFacts" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single host", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/hosts/{id}/groups/": { + "get": { + "description": "groups associated with the selected\nhost.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more group records. \n\n## Results\n\nEach group data structure includes the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n## Sorting\n\nTo specify that groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_hosts_groups_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Group" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "groups associated with the selected\nhost.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more group records. \n\n## Results\n\nEach group data structure includes the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n## Sorting\n\nTo specify that groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_hosts_groups_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Group" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Group" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/hosts/{id}/inventory_sources/": { + "get": { + "description": "inventory sources associated with the selected\nhost.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventory sources\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory source records. \n\n## Results\n\nEach inventory source data structure includes the following fields:\n\n* `id`: Database ID for this inventory source. (integer)\n* `type`: Data type for this inventory source. (choice)\n* `url`: URL for this inventory source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory source was created. (datetime)\n* `modified`: Timestamp when this inventory source was last modified. (datetime)\n* `name`: Name of this inventory source. (string)\n* `description`: Optional description of this inventory source. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `none`: No External Source\n* `execution_environment`: The container image to be used for execution. (id)\n* `inventory`: (id)\n* `update_on_launch`: (boolean)\n* `update_cache_timeout`: (integer)\n* `source_project`: Project containing inventory file used as source. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n## Sorting\n\nTo specify that inventory sources are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_hosts_inventory_sources_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InventorySource" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/hosts/{id}/job_events/": { + "get": { + "description": "job events associated with the selected\nhost.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of job events\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more job event records. \n\n## Results\n\nEach job event data structure includes the following fields:\n\n* `id`: Database ID for this job event. (integer)\n* `type`: Data type for this job event. (choice)\n* `url`: URL for this job event. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job event was created. (datetime)\n* `modified`: Timestamp when this job event was last modified. (datetime)\n* `job`: (id)\n* `event`: (choice)\n - `runner_on_failed`: Host Failed\n - `runner_on_start`: Host Started\n - `runner_on_ok`: Host OK\n - `runner_on_error`: Host Failure\n - `runner_on_skipped`: Host Skipped\n - `runner_on_unreachable`: Host Unreachable\n - `runner_on_no_hosts`: No Hosts Remaining\n - `runner_on_async_poll`: Host Polling\n - `runner_on_async_ok`: Host Async OK\n - `runner_on_async_failed`: Host Async Failure\n - `runner_item_on_ok`: Item OK\n - `runner_item_on_failed`: Item Failed\n - `runner_item_on_skipped`: Item Skipped\n - `runner_retry`: Host Retry\n - `runner_on_file_diff`: File Difference\n - `playbook_on_start`: Playbook Started\n - `playbook_on_notify`: Running Handlers\n - `playbook_on_include`: Including File\n - `playbook_on_no_hosts_matched`: No Hosts Matched\n - `playbook_on_no_hosts_remaining`: No Hosts Remaining\n - `playbook_on_task_start`: Task Started\n - `playbook_on_vars_prompt`: Variables Prompted\n - `playbook_on_setup`: Gathering Facts\n - `playbook_on_import_for_host`: internal: on Import for Host\n - `playbook_on_not_import_for_host`: internal: on Not Import for Host\n - `playbook_on_play_start`: Play Started\n - `playbook_on_stats`: Playbook Complete\n - `debug`: Debug\n - `verbose`: Verbose\n - `deprecated`: Deprecated\n - `warning`: Warning\n - `system_warning`: System Warning\n - `error`: Error\n* `counter`: (integer)\n* `event_display`: (string)\n* `event_data`: (json)\n* `event_level`: (integer)\n* `failed`: (boolean)\n* `changed`: (boolean)\n* `uuid`: (string)\n* `parent_uuid`: (string)\n* `host`: (id)\n* `host_name`: (string)\n* `playbook`: (string)\n* `play`: (string)\n* `task`: (string)\n* `role`: (string)\n* `stdout`: (string)\n* `start_line`: (integer)\n* `end_line`: (integer)\n* `verbosity`: (integer)\n\n\n\n## Sorting\n\nTo specify that job events are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_hosts_job_events_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/JobEvent" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/hosts/{id}/job_host_summaries/": { + "get": { + "description": "job host summaries associated with the selected\nhost.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of job host summaries\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more job host summary records. \n\n## Results\n\nEach job host summary data structure includes the following fields:\n\n* `id`: Database ID for this job host summary. (integer)\n* `type`: Data type for this job host summary. (choice)\n* `url`: URL for this job host summary. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job host summary was created. (datetime)\n* `modified`: Timestamp when this job host summary was last modified. (datetime)\n* `job`: (id)\n* `host`: (id)\n* `constructed_host`: Only for jobs run against constructed inventories, this links to the host inside the constructed inventory. (id)\n* `host_name`: (string)\n* `changed`: (integer)\n* `dark`: (integer)\n* `failures`: (integer)\n* `ok`: (integer)\n* `processed`: (integer)\n* `skipped`: (integer)\n* `failed`: (boolean)\n* `ignored`: (integer)\n* `rescued`: (integer)\n\n\n\n## Sorting\n\nTo specify that job host summaries are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_hosts_job_host_summaries_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/JobHostSummary" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/hosts/{id}/smart_inventories/": { + "get": { + "description": "inventories associated with the selected\nhost.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventories\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory records. \n\n## Results\n\nEach inventory data structure includes the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `host_filter`: Filter that will be applied to the hosts of this inventory. (string)\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n## Sorting\n\nTo specify that inventories are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_hosts_smart_inventories_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Inventory" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/hosts/{id}/variable_data/": { + "get": { + "consumes": [ + "application/json", + "application/yaml" + ], + "description": "host.\n\n\n\n# Update Host Variable Data:\n\nMake a PUT or PATCH request to this resource to update variables defined for a\nhost.", + "operationId": "api_hosts_variable_data_read", + "parameters": [], + "produces": [ + "application/json", + "application/yaml" + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/HostVariableData" + } + } + }, + "summary": "Make a GET request to this resource to retrieve all variables defined for a", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "consumes": [ + "application/json", + "application/yaml" + ], + "description": "host.\n\n\n\n# Update Host Variable Data:\n\nMake a PUT or PATCH request to this resource to update variables defined for a\nhost.", + "operationId": "api_hosts_variable_data_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/HostVariableData" + } + } + ], + "produces": [ + "application/json", + "application/yaml" + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/HostVariableData" + } + } + }, + "summary": "Make a GET request to this resource to retrieve all variables defined for a", + "tags": [ + "api" + ] + }, + "put": { + "consumes": [ + "application/json", + "application/yaml" + ], + "description": "host.\n\n\n\n# Update Host Variable Data:\n\nMake a PUT or PATCH request to this resource to update variables defined for a\nhost.", + "operationId": "api_hosts_variable_data_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/HostVariableData" + } + } + ], + "produces": [ + "application/json", + "application/yaml" + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/HostVariableData" + } + } + }, + "summary": "Make a GET request to this resource to retrieve all variables defined for a", + "tags": [ + "api" + ] + } + }, + "/api/v2/instance_groups/": { + "get": { + "description": "instance groups.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_instance_groups_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InstanceGroup" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "instance groups.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_instance_groups_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/instance_groups/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n\n\n# Update an Instance Group:\n\nMake a PUT or PATCH request to this resource to update this\ninstance group. The following fields may be modified:\n\n\n\n\n\n\n* `name`: Name of this instance group. (string, required)\n\n\n\n\n\n\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer, default=`0`)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer, default=`0`)\n\n\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean, default=``)\n* `credential`: (id, default=``)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer, default=`0`)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer, default=`0`)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json, default=``)\n* `pod_spec_override`: (string, default=`\"\"`)\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Instance Group:\n\nMake a DELETE request to this resource to delete this instance group.", + "operationId": "api_instance_groups_delete", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "204": { + "description": "" + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + }, + "409": { + "examples": { + "application/json": { + "active_jobs": [ + { + "id": 1, + "type": "job" + }, + { + "id": 2, + "type": "job" + }, + { + "id": 3, + "type": "project_update" + }, + { + "id": 4, + "type": "project_update" + } + ], + "error": "Resource is being used by running jobs." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single instance group", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n\n\n# Update an Instance Group:\n\nMake a PUT or PATCH request to this resource to update this\ninstance group. The following fields may be modified:\n\n\n\n\n\n\n* `name`: Name of this instance group. (string, required)\n\n\n\n\n\n\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer, default=`0`)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer, default=`0`)\n\n\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean, default=``)\n* `credential`: (id, default=``)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer, default=`0`)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer, default=`0`)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json, default=``)\n* `pod_spec_override`: (string, default=`\"\"`)\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Instance Group:\n\nMake a DELETE request to this resource to delete this instance group.", + "operationId": "api_instance_groups_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single instance group", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n\n\n# Update an Instance Group:\n\nMake a PUT or PATCH request to this resource to update this\ninstance group. The following fields may be modified:\n\n\n\n\n\n\n* `name`: Name of this instance group. (string, required)\n\n\n\n\n\n\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer, default=`0`)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer, default=`0`)\n\n\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean, default=``)\n* `credential`: (id, default=``)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer, default=`0`)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer, default=`0`)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json, default=``)\n* `pod_spec_override`: (string, default=`\"\"`)\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Instance Group:\n\nMake a DELETE request to this resource to delete this instance group.", + "operationId": "api_instance_groups_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "name": "foobar" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 2, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "foobar", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/2/access_list/", + "instances": "/api/v2/instance_groups/2/instances/", + "jobs": "/api/v2/instance_groups/2/jobs/", + "object_roles": "/api/v2/instance_groups/2/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 43, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 45, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 44, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/2/" + } + }, + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + }, + "400": { + "examples": { + "application/json": { + "policy_instance_list": [ + "Containerized instances may not be managed via the API" + ] + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single instance group", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n\n\n# Update an Instance Group:\n\nMake a PUT or PATCH request to this resource to update this\ninstance group. The following fields may be modified:\n\n\n\n\n\n\n* `name`: Name of this instance group. (string, required)\n\n\n\n\n\n\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer, default=`0`)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer, default=`0`)\n\n\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean, default=``)\n* `credential`: (id, default=``)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer, default=`0`)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer, default=`0`)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json, default=``)\n* `pod_spec_override`: (string, default=`\"\"`)\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Instance Group:\n\nMake a DELETE request to this resource to delete this instance group.", + "operationId": "api_instance_groups_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single instance group", + "tags": [ + "api" + ] + } + }, + "/api/v2/instance_groups/{id}/access_list/": { + "get": { + "description": "users.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_instance_groups_access_list_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ResourceAccessListElement" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/instance_groups/{id}/instances/": { + "get": { + "description": "instances associated with the selected\ninstance group.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instances\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance records. \n\n## Results\n\nEach instance data structure includes the following fields:\n\n* `id`: Database ID for this instance. (integer)\n* `hostname`: (string)\n* `type`: Data type for this instance. (choice)\n* `url`: URL for this instance. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `uuid`: (string)\n* `created`: Timestamp when this instance was created. (datetime)\n* `modified`: Timestamp when this instance was last modified. (datetime)\n* `last_seen`: Last time instance ran its heartbeat task for main cluster nodes. Last known connection to receptor mesh for execution nodes. (datetime)\n* `health_check_started`: The last time a health check was initiated on this instance. (datetime)\n* `health_check_pending`: (field)\n* `last_health_check`: Last time a health check was ran on this instance to refresh cpu, memory, and capacity. (datetime)\n* `errors`: Any error details from the last health check. (string)\n* `capacity_adjustment`: (decimal)\n* `version`: (string)\n* `capacity`: (integer)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: Count of jobs in the running or waiting state that are targeted for this instance (integer)\n* `jobs_total`: Count of all jobs that target this instance (integer)\n* `cpu`: (decimal)\n* `memory`: Total system memory of this instance in bytes. (integer)\n* `cpu_capacity`: (integer)\n* `mem_capacity`: (integer)\n* `enabled`: (boolean)\n* `managed_by_policy`: (boolean)\n* `node_type`: Role that this node plays in the mesh. (choice)\n - `control`: Control plane node\n - `execution`: Execution plane node\n - `hybrid`: Controller and execution\n - `hop`: Message-passing node, no execution capability\n* `node_state`: Indicates the current life cycle stage of this instance. (choice)\n - `provisioning`: Provisioning\n - `provision-fail`: Provisioning Failure\n - `installed`: Installed\n - `ready`: Ready\n - `unavailable`: Unavailable\n - `deprovisioning`: De-provisioning\n - `deprovision-fail`: De-provisioning Failure\n* `ip_address`: (string)\n* `listener_port`: Port that Receptor will listen for incoming connections on. (integer)\n* `peers`: (field)\n* `peers_from_control_nodes`: If True, control plane cluster nodes should automatically peer to it. (boolean)\n\n\n\n## Sorting\n\nTo specify that instances are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_instance_groups_instances_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Instance" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "instances associated with the selected\ninstance group.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instances\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance records. \n\n## Results\n\nEach instance data structure includes the following fields:\n\n* `id`: Database ID for this instance. (integer)\n* `hostname`: (string)\n* `type`: Data type for this instance. (choice)\n* `url`: URL for this instance. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `uuid`: (string)\n* `created`: Timestamp when this instance was created. (datetime)\n* `modified`: Timestamp when this instance was last modified. (datetime)\n* `last_seen`: Last time instance ran its heartbeat task for main cluster nodes. Last known connection to receptor mesh for execution nodes. (datetime)\n* `health_check_started`: The last time a health check was initiated on this instance. (datetime)\n* `health_check_pending`: (field)\n* `last_health_check`: Last time a health check was ran on this instance to refresh cpu, memory, and capacity. (datetime)\n* `errors`: Any error details from the last health check. (string)\n* `capacity_adjustment`: (decimal)\n* `version`: (string)\n* `capacity`: (integer)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: Count of jobs in the running or waiting state that are targeted for this instance (integer)\n* `jobs_total`: Count of all jobs that target this instance (integer)\n* `cpu`: (decimal)\n* `memory`: Total system memory of this instance in bytes. (integer)\n* `cpu_capacity`: (integer)\n* `mem_capacity`: (integer)\n* `enabled`: (boolean)\n* `managed_by_policy`: (boolean)\n* `node_type`: Role that this node plays in the mesh. (choice)\n - `control`: Control plane node\n - `execution`: Execution plane node\n - `hybrid`: Controller and execution\n - `hop`: Message-passing node, no execution capability\n* `node_state`: Indicates the current life cycle stage of this instance. (choice)\n - `provisioning`: Provisioning\n - `provision-fail`: Provisioning Failure\n - `installed`: Installed\n - `ready`: Ready\n - `unavailable`: Unavailable\n - `deprovisioning`: De-provisioning\n - `deprovision-fail`: De-provisioning Failure\n* `ip_address`: (string)\n* `listener_port`: Port that Receptor will listen for incoming connections on. (integer)\n* `peers`: (field)\n* `peers_from_control_nodes`: If True, control plane cluster nodes should automatically peer to it. (boolean)\n\n\n\n## Sorting\n\nTo specify that instances are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_instance_groups_instances_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "disassociate": true, + "id": 1 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Instance" + } + }, + "400": { + "examples": { + "application/json": { + "msg": "Cannot disassociate hybrid node hybrid_node from controlplane." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/instance_groups/{id}/jobs/": { + "get": { + "description": "unified jobs associated with the selected\ninstance group.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of unified jobs\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more unified job records. \n\n## Results\n\nEach unified job data structure includes the following fields:\n\n* `id`: Database ID for this unified job. (integer)\n* `type`: Data type for this unified job. (choice)\n* `url`: URL for this unified job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this unified job was created. (datetime)\n* `modified`: Timestamp when this unified job was last modified. (datetime)\n* `name`: Name of this unified job. (string)\n* `description`: Optional description of this unified job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n\n\n\n## Sorting\n\nTo specify that unified jobs are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_instance_groups_jobs_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/UnifiedJobList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/instance_groups/{id}/object_roles/": { + "get": { + "description": "roles associated with the selected\ninstance group.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of roles\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more role records. \n\n## Results\n\nEach role data structure includes the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)\n\n\n\n## Sorting\n\nTo specify that roles are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_instance_groups_object_roles_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Role" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/instances/": { + "get": { + "description": "instances.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instances\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance records. \n\n## Results\n\nEach instance data structure includes the following fields:\n\n* `id`: Database ID for this instance. (integer)\n* `hostname`: (string)\n* `type`: Data type for this instance. (choice)\n* `url`: URL for this instance. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `uuid`: (string)\n* `created`: Timestamp when this instance was created. (datetime)\n* `modified`: Timestamp when this instance was last modified. (datetime)\n* `last_seen`: Last time instance ran its heartbeat task for main cluster nodes. Last known connection to receptor mesh for execution nodes. (datetime)\n* `health_check_started`: The last time a health check was initiated on this instance. (datetime)\n* `health_check_pending`: (field)\n* `last_health_check`: Last time a health check was ran on this instance to refresh cpu, memory, and capacity. (datetime)\n* `errors`: Any error details from the last health check. (string)\n* `capacity_adjustment`: (decimal)\n* `version`: (string)\n* `capacity`: (integer)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: Count of jobs in the running or waiting state that are targeted for this instance (integer)\n* `jobs_total`: Count of all jobs that target this instance (integer)\n* `cpu`: (decimal)\n* `memory`: Total system memory of this instance in bytes. (integer)\n* `cpu_capacity`: (integer)\n* `mem_capacity`: (integer)\n* `enabled`: (boolean)\n* `managed_by_policy`: (boolean)\n* `node_type`: Role that this node plays in the mesh. (choice)\n - `control`: Control plane node\n - `execution`: Execution plane node\n - `hybrid`: Controller and execution\n - `hop`: Message-passing node, no execution capability\n* `node_state`: Indicates the current life cycle stage of this instance. (choice)\n - `provisioning`: Provisioning\n - `provision-fail`: Provisioning Failure\n - `installed`: Installed\n - `ready`: Ready\n - `unavailable`: Unavailable\n - `deprovisioning`: De-provisioning\n - `deprovision-fail`: De-provisioning Failure\n* `ip_address`: (string)\n* `listener_port`: Port that Receptor will listen for incoming connections on. (integer)\n* `peers`: (field)\n* `peers_from_control_nodes`: If True, control plane cluster nodes should automatically peer to it. (boolean)\n\n\n\n## Sorting\n\nTo specify that instances are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_instances_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Instance" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "instances.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instances\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance records. \n\n## Results\n\nEach instance data structure includes the following fields:\n\n* `id`: Database ID for this instance. (integer)\n* `hostname`: (string)\n* `type`: Data type for this instance. (choice)\n* `url`: URL for this instance. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `uuid`: (string)\n* `created`: Timestamp when this instance was created. (datetime)\n* `modified`: Timestamp when this instance was last modified. (datetime)\n* `last_seen`: Last time instance ran its heartbeat task for main cluster nodes. Last known connection to receptor mesh for execution nodes. (datetime)\n* `health_check_started`: The last time a health check was initiated on this instance. (datetime)\n* `health_check_pending`: (field)\n* `last_health_check`: Last time a health check was ran on this instance to refresh cpu, memory, and capacity. (datetime)\n* `errors`: Any error details from the last health check. (string)\n* `capacity_adjustment`: (decimal)\n* `version`: (string)\n* `capacity`: (integer)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: Count of jobs in the running or waiting state that are targeted for this instance (integer)\n* `jobs_total`: Count of all jobs that target this instance (integer)\n* `cpu`: (decimal)\n* `memory`: Total system memory of this instance in bytes. (integer)\n* `cpu_capacity`: (integer)\n* `mem_capacity`: (integer)\n* `enabled`: (boolean)\n* `managed_by_policy`: (boolean)\n* `node_type`: Role that this node plays in the mesh. (choice)\n - `control`: Control plane node\n - `execution`: Execution plane node\n - `hybrid`: Controller and execution\n - `hop`: Message-passing node, no execution capability\n* `node_state`: Indicates the current life cycle stage of this instance. (choice)\n - `provisioning`: Provisioning\n - `provision-fail`: Provisioning Failure\n - `installed`: Installed\n - `ready`: Ready\n - `unavailable`: Unavailable\n - `deprovisioning`: De-provisioning\n - `deprovision-fail`: De-provisioning Failure\n* `ip_address`: (string)\n* `listener_port`: Port that Receptor will listen for incoming connections on. (integer)\n* `peers`: (field)\n* `peers_from_control_nodes`: If True, control plane cluster nodes should automatically peer to it. (boolean)\n\n\n\n## Sorting\n\nTo specify that instances are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_instances_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "hostname": "abc7", + "listener_port": 6789, + "node_type": "execution", + "peers_from_control_nodes": false + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "capacity": 100, + "capacity_adjustment": "1.00", + "consumed_capacity": 0, + "cpu": "0.0", + "cpu_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "enabled": true, + "errors": "", + "health_check_pending": false, + "health_check_started": null, + "hostname": "abc7", + "id": 6, + "ip_address": "", + "jobs_running": 0, + "jobs_total": 0, + "last_health_check": null, + "last_seen": null, + "listener_port": 6789, + "managed_by_policy": true, + "mem_capacity": 0, + "memory": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "node_state": "installed", + "node_type": "execution", + "peers": [], + "peers_from_control_nodes": false, + "percent_capacity_remaining": 100.0, + "related": { + "health_check": "/api/v2/instances/6/health_check/", + "install_bundle": "/api/v2/instances/6/install_bundle/", + "instance_groups": "/api/v2/instances/6/instance_groups/", + "jobs": "/api/v2/instances/6/jobs/", + "peers": "/api/v2/instances/6/peers/" + }, + "summary_fields": { + "user_capabilities": { + "edit": false + } + }, + "type": "instance", + "url": "/api/v2/instances/6/", + "uuid": "00000000-0000-0000-0000-000000000000", + "version": "" + } + }, + "schema": { + "$ref": "#/definitions/Instance" + } + }, + "400": { + "examples": { + "application/json": { + "__all__": [ + "Field listener_port must be a valid integer when peers_from_control_nodes is enabled." + ] + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/instances/{id}/": { + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this instance. (integer)\n* `hostname`: (string)\n* `type`: Data type for this instance. (choice)\n* `url`: URL for this instance. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `uuid`: (string)\n* `created`: Timestamp when this instance was created. (datetime)\n* `modified`: Timestamp when this instance was last modified. (datetime)\n* `last_seen`: Last time instance ran its heartbeat task for main cluster nodes. Last known connection to receptor mesh for execution nodes. (datetime)\n* `health_check_started`: The last time a health check was initiated on this instance. (datetime)\n* `health_check_pending`: (field)\n* `last_health_check`: Last time a health check was ran on this instance to refresh cpu, memory, and capacity. (datetime)\n* `errors`: Any error details from the last health check. (string)\n* `capacity_adjustment`: (decimal)\n* `version`: (string)\n* `capacity`: (integer)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: Count of jobs in the running or waiting state that are targeted for this instance (integer)\n* `jobs_total`: Count of all jobs that target this instance (integer)\n* `cpu`: (decimal)\n* `memory`: Total system memory of this instance in bytes. (integer)\n* `cpu_capacity`: (integer)\n* `mem_capacity`: (integer)\n* `enabled`: (boolean)\n* `managed_by_policy`: (boolean)\n* `node_type`: Role that this node plays in the mesh. (choice)\n - `control`: Control plane node\n - `execution`: Execution plane node\n - `hybrid`: Controller and execution\n - `hop`: Message-passing node, no execution capability\n* `node_state`: Indicates the current life cycle stage of this instance. (choice)\n - `provisioning`: Provisioning\n - `provision-fail`: Provisioning Failure\n - `installed`: Installed\n - `ready`: Ready\n - `unavailable`: Unavailable\n - `deprovisioning`: De-provisioning\n - `deprovision-fail`: De-provisioning Failure\n* `ip_address`: (string)\n* `listener_port`: Port that Receptor will listen for incoming connections on. (integer)\n* `peers`: (field)\n* `peers_from_control_nodes`: If True, control plane cluster nodes should automatically peer to it. (boolean)\n\n\n\n\n\n# Update an Instance:\n\nMake a PUT or PATCH request to this resource to update this\ninstance. The following fields may be modified:\n\n\n\n* `hostname`: (string, required)\n\n\n\n\n\n\n\n\n\n\n\n\n* `capacity_adjustment`: (decimal, default=`1`)\n\n\n\n\n\n\n\n\n\n\n* `enabled`: (boolean, default=`True`)\n* `managed_by_policy`: (boolean, default=`True`)\n* `node_type`: Role that this node plays in the mesh. (choice)\n - `control`: Control plane node\n - `execution`: Execution plane node (default)\n - `hybrid`: Controller and execution\n - `hop`: Message-passing node, no execution capability\n* `node_state`: Indicates the current life cycle stage of this instance. (choice)\n - `provisioning`: Provisioning\n - `provision-fail`: Provisioning Failure\n - `installed`: Installed (default)\n - `ready`: Ready\n - `unavailable`: Unavailable\n - `deprovisioning`: De-provisioning\n - `deprovision-fail`: De-provisioning Failure\n\n* `listener_port`: Port that Receptor will listen for incoming connections on. (integer, default=`None`)\n* `peers`: (field, default=`None`)\n* `peers_from_control_nodes`: If True, control plane cluster nodes should automatically peer to it. (boolean, default=`False`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.", + "operationId": "api_instances_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Instance" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single instance", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this instance. (integer)\n* `hostname`: (string)\n* `type`: Data type for this instance. (choice)\n* `url`: URL for this instance. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `uuid`: (string)\n* `created`: Timestamp when this instance was created. (datetime)\n* `modified`: Timestamp when this instance was last modified. (datetime)\n* `last_seen`: Last time instance ran its heartbeat task for main cluster nodes. Last known connection to receptor mesh for execution nodes. (datetime)\n* `health_check_started`: The last time a health check was initiated on this instance. (datetime)\n* `health_check_pending`: (field)\n* `last_health_check`: Last time a health check was ran on this instance to refresh cpu, memory, and capacity. (datetime)\n* `errors`: Any error details from the last health check. (string)\n* `capacity_adjustment`: (decimal)\n* `version`: (string)\n* `capacity`: (integer)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: Count of jobs in the running or waiting state that are targeted for this instance (integer)\n* `jobs_total`: Count of all jobs that target this instance (integer)\n* `cpu`: (decimal)\n* `memory`: Total system memory of this instance in bytes. (integer)\n* `cpu_capacity`: (integer)\n* `mem_capacity`: (integer)\n* `enabled`: (boolean)\n* `managed_by_policy`: (boolean)\n* `node_type`: Role that this node plays in the mesh. (choice)\n - `control`: Control plane node\n - `execution`: Execution plane node\n - `hybrid`: Controller and execution\n - `hop`: Message-passing node, no execution capability\n* `node_state`: Indicates the current life cycle stage of this instance. (choice)\n - `provisioning`: Provisioning\n - `provision-fail`: Provisioning Failure\n - `installed`: Installed\n - `ready`: Ready\n - `unavailable`: Unavailable\n - `deprovisioning`: De-provisioning\n - `deprovision-fail`: De-provisioning Failure\n* `ip_address`: (string)\n* `listener_port`: Port that Receptor will listen for incoming connections on. (integer)\n* `peers`: (field)\n* `peers_from_control_nodes`: If True, control plane cluster nodes should automatically peer to it. (boolean)\n\n\n\n\n\n# Update an Instance:\n\nMake a PUT or PATCH request to this resource to update this\ninstance. The following fields may be modified:\n\n\n\n* `hostname`: (string, required)\n\n\n\n\n\n\n\n\n\n\n\n\n* `capacity_adjustment`: (decimal, default=`1`)\n\n\n\n\n\n\n\n\n\n\n* `enabled`: (boolean, default=`True`)\n* `managed_by_policy`: (boolean, default=`True`)\n* `node_type`: Role that this node plays in the mesh. (choice)\n - `control`: Control plane node\n - `execution`: Execution plane node (default)\n - `hybrid`: Controller and execution\n - `hop`: Message-passing node, no execution capability\n* `node_state`: Indicates the current life cycle stage of this instance. (choice)\n - `provisioning`: Provisioning\n - `provision-fail`: Provisioning Failure\n - `installed`: Installed (default)\n - `ready`: Ready\n - `unavailable`: Unavailable\n - `deprovisioning`: De-provisioning\n - `deprovision-fail`: De-provisioning Failure\n\n* `listener_port`: Port that Receptor will listen for incoming connections on. (integer, default=`None`)\n* `peers`: (field, default=`None`)\n* `peers_from_control_nodes`: If True, control plane cluster nodes should automatically peer to it. (boolean, default=`False`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.", + "operationId": "api_instances_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "peers_from_control_nodes": true + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "capacity": 0, + "capacity_adjustment": "1.00", + "consumed_capacity": 0, + "cpu": "0.0", + "cpu_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "enabled": true, + "errors": "", + "health_check_pending": false, + "health_check_started": null, + "hostname": "hop2", + "id": 3, + "ip_address": "", + "jobs_running": 0, + "jobs_total": 0, + "last_health_check": null, + "last_seen": null, + "listener_port": 6789, + "managed_by_policy": true, + "mem_capacity": 0, + "memory": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "node_state": "ready", + "node_type": "hop", + "peers": [], + "peers_from_control_nodes": true, + "percent_capacity_remaining": 0.0, + "related": { + "install_bundle": "/api/v2/instances/3/install_bundle/", + "instance_groups": "/api/v2/instances/3/instance_groups/", + "jobs": "/api/v2/instances/3/jobs/", + "peers": "/api/v2/instances/3/peers/" + }, + "summary_fields": { + "links": [], + "user_capabilities": { + "edit": false + } + }, + "type": "instance", + "url": "/api/v2/instances/3/", + "uuid": "00000000-0000-0000-0000-000000000000", + "version": "" + } + }, + "schema": { + "$ref": "#/definitions/Instance" + } + }, + "400": { + "examples": { + "application/json": { + "node_state": [ + "Can only change instances to the 'deprovisioning' state." + ] + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single instance", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this instance. (integer)\n* `hostname`: (string)\n* `type`: Data type for this instance. (choice)\n* `url`: URL for this instance. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `uuid`: (string)\n* `created`: Timestamp when this instance was created. (datetime)\n* `modified`: Timestamp when this instance was last modified. (datetime)\n* `last_seen`: Last time instance ran its heartbeat task for main cluster nodes. Last known connection to receptor mesh for execution nodes. (datetime)\n* `health_check_started`: The last time a health check was initiated on this instance. (datetime)\n* `health_check_pending`: (field)\n* `last_health_check`: Last time a health check was ran on this instance to refresh cpu, memory, and capacity. (datetime)\n* `errors`: Any error details from the last health check. (string)\n* `capacity_adjustment`: (decimal)\n* `version`: (string)\n* `capacity`: (integer)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: Count of jobs in the running or waiting state that are targeted for this instance (integer)\n* `jobs_total`: Count of all jobs that target this instance (integer)\n* `cpu`: (decimal)\n* `memory`: Total system memory of this instance in bytes. (integer)\n* `cpu_capacity`: (integer)\n* `mem_capacity`: (integer)\n* `enabled`: (boolean)\n* `managed_by_policy`: (boolean)\n* `node_type`: Role that this node plays in the mesh. (choice)\n - `control`: Control plane node\n - `execution`: Execution plane node\n - `hybrid`: Controller and execution\n - `hop`: Message-passing node, no execution capability\n* `node_state`: Indicates the current life cycle stage of this instance. (choice)\n - `provisioning`: Provisioning\n - `provision-fail`: Provisioning Failure\n - `installed`: Installed\n - `ready`: Ready\n - `unavailable`: Unavailable\n - `deprovisioning`: De-provisioning\n - `deprovision-fail`: De-provisioning Failure\n* `ip_address`: (string)\n* `listener_port`: Port that Receptor will listen for incoming connections on. (integer)\n* `peers`: (field)\n* `peers_from_control_nodes`: If True, control plane cluster nodes should automatically peer to it. (boolean)\n\n\n\n\n\n# Update an Instance:\n\nMake a PUT or PATCH request to this resource to update this\ninstance. The following fields may be modified:\n\n\n\n* `hostname`: (string, required)\n\n\n\n\n\n\n\n\n\n\n\n\n* `capacity_adjustment`: (decimal, default=`1`)\n\n\n\n\n\n\n\n\n\n\n* `enabled`: (boolean, default=`True`)\n* `managed_by_policy`: (boolean, default=`True`)\n* `node_type`: Role that this node plays in the mesh. (choice)\n - `control`: Control plane node\n - `execution`: Execution plane node (default)\n - `hybrid`: Controller and execution\n - `hop`: Message-passing node, no execution capability\n* `node_state`: Indicates the current life cycle stage of this instance. (choice)\n - `provisioning`: Provisioning\n - `provision-fail`: Provisioning Failure\n - `installed`: Installed (default)\n - `ready`: Ready\n - `unavailable`: Unavailable\n - `deprovisioning`: De-provisioning\n - `deprovision-fail`: De-provisioning Failure\n\n* `listener_port`: Port that Receptor will listen for incoming connections on. (integer, default=`None`)\n* `peers`: (field, default=`None`)\n* `peers_from_control_nodes`: If True, control plane cluster nodes should automatically peer to it. (boolean, default=`False`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.", + "operationId": "api_instances_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Instance" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Instance" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single instance", + "tags": [ + "api" + ] + } + }, + "/api/v2/instances/{id}/health_check/": { + "get": { + "description": "Instance fields affected by the health check are shown in this view.\nFundamentally, health checks require running code on the machine in question.\n\n - For instances with `node_type` of \"control\" or \"hybrid\", health checks are\nperformed as part of a periodic task that runs in the background.\n - For instances with `node_type` of \"execution\", health checks are done by submitting\na work unit through the receptor mesh.\n\nIf ran through the receptor mesh, the invoked command is:\n\n```\nansible-runner worker --worker-info\n```\n\nFor execution nodes, these checks are _not_ performed on a regular basis.\nHealth checks against functional nodes will be ran when the node is first discovered.\nHealth checks against nodes with errors will be repeated at a reduced frequency.\n\n\n\n\n# Manually Initiate a Health Check\nFor purposes of error remediation or debugging, a health check can be\nmanually initiated by making a POST request to this endpoint.\n\nThis will submit the work unit to the target node through the receptor mesh and wait for it to finish.\nThe model will be updated with the result.\nUp-to-date values of the fields will be returned in the response data.", + "operationId": "api_instances_health_check_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "capacity": 100, + "cpu": "6.0", + "cpu_capacity": 6, + "errors": "", + "hostname": "example-host", + "ip_address": "", + "last_health_check": null, + "mem_capacity": 42, + "memory": 36000000000, + "uuid": "00000000-0000-0000-0000-000000000000", + "version": "" + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InstanceHealthCheck" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Health checks are used to obtain important data about an instance.", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "Instance fields affected by the health check are shown in this view.\nFundamentally, health checks require running code on the machine in question.\n\n - For instances with `node_type` of \"control\" or \"hybrid\", health checks are\nperformed as part of a periodic task that runs in the background.\n - For instances with `node_type` of \"execution\", health checks are done by submitting\na work unit through the receptor mesh.\n\nIf ran through the receptor mesh, the invoked command is:\n\n```\nansible-runner worker --worker-info\n```\n\nFor execution nodes, these checks are _not_ performed on a regular basis.\nHealth checks against functional nodes will be ran when the node is first discovered.\nHealth checks against nodes with errors will be repeated at a reduced frequency.\n\n\n\n\n# Manually Initiate a Health Check\nFor purposes of error remediation or debugging, a health check can be\nmanually initiated by making a POST request to this endpoint.\n\nThis will submit the work unit to the target node through the receptor mesh and wait for it to finish.\nThe model will be updated with the result.\nUp-to-date values of the fields will be returned in the response data.", + "operationId": "api_instances_health_check_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/InstanceHealthCheck" + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "examples": { + "application/json": { + "msg": "Health check is running for example-host." + } + } + }, + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/InstanceHealthCheck" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Health checks are used to obtain important data about an instance.", + "tags": [ + "api" + ] + } + }, + "/api/v2/instances/{id}/install_bundle/": { + "get": { + "description": "", + "operationId": "api_instances_install_bundle_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Instance" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "No Description for get on /api/{version}/instances/{id}/install_bundle/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/instances/{id}/instance_groups/": { + "get": { + "description": "instance groups associated with the selected\ninstance.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_instances_instance_groups_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InstanceGroup" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "instance groups associated with the selected\ninstance.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_instances_instance_groups_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "disassociate": true, + "id": 1 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + }, + "400": { + "examples": { + "application/json": { + "msg": "Cannot disassociate hybrid instance hybrid_node from controlplane." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/instances/{id}/jobs/": { + "get": { + "description": "unified jobs associated with the selected\ninstance.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of unified jobs\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more unified job records. \n\n## Results\n\nEach unified job data structure includes the following fields:\n\n* `id`: Database ID for this unified job. (integer)\n* `type`: Data type for this unified job. (choice)\n* `url`: URL for this unified job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this unified job was created. (datetime)\n* `modified`: Timestamp when this unified job was last modified. (datetime)\n* `name`: Name of this unified job. (string)\n* `description`: Optional description of this unified job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n\n\n\n## Sorting\n\nTo specify that unified jobs are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_instances_jobs_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/UnifiedJobList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/instances/{id}/peers/": { + "get": { + "description": "instances associated with the selected\ninstance.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instances\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance records. \n\n## Results\n\nEach instance data structure includes the following fields:\n\n* `id`: Database ID for this instance. (integer)\n* `hostname`: (string)\n* `type`: Data type for this instance. (choice)\n* `url`: URL for this instance. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `uuid`: (string)\n* `created`: Timestamp when this instance was created. (datetime)\n* `modified`: Timestamp when this instance was last modified. (datetime)\n* `last_seen`: Last time instance ran its heartbeat task for main cluster nodes. Last known connection to receptor mesh for execution nodes. (datetime)\n* `health_check_started`: The last time a health check was initiated on this instance. (datetime)\n* `health_check_pending`: (field)\n* `last_health_check`: Last time a health check was ran on this instance to refresh cpu, memory, and capacity. (datetime)\n* `errors`: Any error details from the last health check. (string)\n* `capacity_adjustment`: (decimal)\n* `version`: (string)\n* `capacity`: (integer)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: Count of jobs in the running or waiting state that are targeted for this instance (integer)\n* `jobs_total`: Count of all jobs that target this instance (integer)\n* `cpu`: (decimal)\n* `memory`: Total system memory of this instance in bytes. (integer)\n* `cpu_capacity`: (integer)\n* `mem_capacity`: (integer)\n* `enabled`: (boolean)\n* `managed_by_policy`: (boolean)\n* `node_type`: Role that this node plays in the mesh. (choice)\n - `control`: Control plane node\n - `execution`: Execution plane node\n - `hybrid`: Controller and execution\n - `hop`: Message-passing node, no execution capability\n* `node_state`: Indicates the current life cycle stage of this instance. (choice)\n - `provisioning`: Provisioning\n - `provision-fail`: Provisioning Failure\n - `installed`: Installed\n - `ready`: Ready\n - `unavailable`: Unavailable\n - `deprovisioning`: De-provisioning\n - `deprovision-fail`: De-provisioning Failure\n* `ip_address`: (string)\n* `listener_port`: Port that Receptor will listen for incoming connections on. (integer)\n* `peers`: (field)\n* `peers_from_control_nodes`: If True, control plane cluster nodes should automatically peer to it. (boolean)\n\n\n\n## Sorting\n\nTo specify that instances are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_instances_peers_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Instance" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventories/": { + "get": { + "description": "inventories.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventories\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory records. \n\n## Results\n\nEach inventory data structure includes the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `host_filter`: Filter that will be applied to the hosts of this inventory. (string)\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n## Sorting\n\nTo specify that inventories are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Inventory" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "inventories.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventories\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory records. \n\n## Results\n\nEach inventory data structure includes the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `host_filter`: Filter that will be applied to the hosts of this inventory. (string)\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n## Sorting\n\nTo specify that inventories are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "host_filter": "ansible_facts__ansible_distribution__exact=\"CentOS\"", + "kind": "smart", + "name": "smart inventory", + "organization": 1 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "host_filter": "ansible_facts__ansible_distribution__exact=\"CentOS\"", + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "smart", + "modified": "2018-02-01T08:00:00.000000Z", + "name": "smart inventory", + "organization": 1, + "pending_deletion": false, + "prevent_instance_group_fallback": false, + "related": { + "access_list": "/api/v2/inventories/1/access_list/", + "activity_stream": "/api/v2/inventories/1/activity_stream/", + "ad_hoc_commands": "/api/v2/inventories/1/ad_hoc_commands/", + "copy": "/api/v2/inventories/1/copy/", + "hosts": "/api/v2/inventories/1/hosts/", + "instance_groups": "/api/v2/inventories/1/instance_groups/", + "job_templates": "/api/v2/inventories/1/job_templates/", + "labels": "/api/v2/inventories/1/labels/", + "object_roles": "/api/v2/inventories/1/object_roles/", + "organization": "/api/v2/organizations/1/", + "script": "/api/v2/inventories/1/script/", + "variable_data": "/api/v2/inventories/1/variable_data/" + }, + "summary_fields": { + "labels": { + "count": 0, + "results": [] + }, + "object_roles": { + "adhoc_role": { + "description": "May run ad hoc commands on the inventory", + "id": 18, + "name": "Ad Hoc" + }, + "admin_role": { + "description": "Can manage all aspects of the inventory", + "id": 16, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the inventory", + "id": 20, + "name": "Read" + }, + "update_role": { + "description": "May update the inventory", + "id": 17, + "name": "Update" + }, + "use_role": { + "description": "Can use the inventory in a job template", + "id": 19, + "name": "Use" + } + }, + "organization": { + "description": "test-org-desc", + "id": 1, + "name": "test-org" + }, + "user_capabilities": { + "adhoc": true, + "copy": true, + "delete": true, + "edit": true + } + }, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0, + "type": "inventory", + "url": "/api/v2/inventories/1/", + "variables": "" + } + }, + "schema": { + "$ref": "#/definitions/Inventory" + } + }, + "400": { + "examples": { + "application/json": { + "host_filter": { + "host_filter": [ + "ansible_facts does not support searching with __has_keys" + ] + } + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventories/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `host_filter`: Filter that will be applied to the hosts of this inventory. (string)\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n\n\n# Update an Inventory:\n\nMake a PUT or PATCH request to this resource to update this\ninventory. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this inventory. (string, required)\n* `description`: Optional description of this inventory. (string, default=`\"\"`)\n* `organization`: Organization containing this inventory. (id, required)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory. (default)\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `host_filter`: Filter that will be applied to the hosts of this inventory. (string, default=`\"\"`)\n* `variables`: Inventory variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\n\n\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean, default=`False`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Inventory:\n\nMake a DELETE request to this resource to delete this inventory.", + "operationId": "api_inventories_delete", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "204": { + "description": "" + }, + "400": { + "examples": { + "application/json": { + "error": "Inventory is already pending deletion." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `host_filter`: Filter that will be applied to the hosts of this inventory. (string)\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n\n\n# Update an Inventory:\n\nMake a PUT or PATCH request to this resource to update this\ninventory. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this inventory. (string, required)\n* `description`: Optional description of this inventory. (string, default=`\"\"`)\n* `organization`: Organization containing this inventory. (id, required)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory. (default)\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `host_filter`: Filter that will be applied to the hosts of this inventory. (string, default=`\"\"`)\n* `variables`: Inventory variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\n\n\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean, default=`False`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Inventory:\n\nMake a DELETE request to this resource to delete this inventory.", + "operationId": "api_inventories_read", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "host_filter": null, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "modified": "2018-02-01T08:00:00.000000Z", + "name": "test-inv", + "organization": 1, + "pending_deletion": true, + "prevent_instance_group_fallback": false, + "related": { + "access_list": "/api/v2/inventories/1/access_list/", + "activity_stream": "/api/v2/inventories/1/activity_stream/", + "ad_hoc_commands": "/api/v2/inventories/1/ad_hoc_commands/", + "copy": "/api/v2/inventories/1/copy/", + "groups": "/api/v2/inventories/1/groups/", + "hosts": "/api/v2/inventories/1/hosts/", + "instance_groups": "/api/v2/inventories/1/instance_groups/", + "inventory_sources": "/api/v2/inventories/1/inventory_sources/", + "job_templates": "/api/v2/inventories/1/job_templates/", + "labels": "/api/v2/inventories/1/labels/", + "object_roles": "/api/v2/inventories/1/object_roles/", + "organization": "/api/v2/organizations/1/", + "root_groups": "/api/v2/inventories/1/root_groups/", + "script": "/api/v2/inventories/1/script/", + "tree": "/api/v2/inventories/1/tree/", + "update_inventory_sources": "/api/v2/inventories/1/update_inventory_sources/", + "variable_data": "/api/v2/inventories/1/variable_data/" + }, + "summary_fields": { + "labels": { + "count": 0, + "results": [] + }, + "object_roles": { + "adhoc_role": { + "description": "May run ad hoc commands on the inventory", + "id": 18, + "name": "Ad Hoc" + }, + "admin_role": { + "description": "Can manage all aspects of the inventory", + "id": 16, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the inventory", + "id": 20, + "name": "Read" + }, + "update_role": { + "description": "May update the inventory", + "id": 17, + "name": "Update" + }, + "use_role": { + "description": "Can use the inventory in a job template", + "id": 19, + "name": "Use" + } + }, + "organization": { + "description": "test-org-desc", + "id": 1, + "name": "test-org" + }, + "user_capabilities": { + "adhoc": true, + "copy": false, + "delete": true, + "edit": true + } + }, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0, + "type": "inventory", + "url": "/api/v2/inventories/1/", + "variables": "" + } + }, + "schema": { + "$ref": "#/definitions/Inventory" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `host_filter`: Filter that will be applied to the hosts of this inventory. (string)\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n\n\n# Update an Inventory:\n\nMake a PUT or PATCH request to this resource to update this\ninventory. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this inventory. (string, required)\n* `description`: Optional description of this inventory. (string, default=`\"\"`)\n* `organization`: Organization containing this inventory. (id, required)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory. (default)\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `host_filter`: Filter that will be applied to the hosts of this inventory. (string, default=`\"\"`)\n* `variables`: Inventory variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\n\n\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean, default=`False`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Inventory:\n\nMake a DELETE request to this resource to delete this inventory.", + "operationId": "api_inventories_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Inventory" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Inventory" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `host_filter`: Filter that will be applied to the hosts of this inventory. (string)\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n\n\n# Update an Inventory:\n\nMake a PUT or PATCH request to this resource to update this\ninventory. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this inventory. (string, required)\n* `description`: Optional description of this inventory. (string, default=`\"\"`)\n* `organization`: Organization containing this inventory. (id, required)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory. (default)\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `host_filter`: Filter that will be applied to the hosts of this inventory. (string, default=`\"\"`)\n* `variables`: Inventory variables in JSON or YAML format. (json, default=``)\n\n\n\n\n\n\n\n\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean, default=`False`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Inventory:\n\nMake a DELETE request to this resource to delete this inventory.", + "operationId": "api_inventories_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "description": "Hello world", + "name": "New name", + "organization": 1 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "Hello world", + "has_active_failures": false, + "has_inventory_sources": false, + "host_filter": null, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "modified": "2018-02-01T08:00:00.000000Z", + "name": "New name", + "organization": 1, + "pending_deletion": false, + "prevent_instance_group_fallback": false, + "related": { + "access_list": "/api/v2/inventories/1/access_list/", + "activity_stream": "/api/v2/inventories/1/activity_stream/", + "ad_hoc_commands": "/api/v2/inventories/1/ad_hoc_commands/", + "copy": "/api/v2/inventories/1/copy/", + "groups": "/api/v2/inventories/1/groups/", + "hosts": "/api/v2/inventories/1/hosts/", + "instance_groups": "/api/v2/inventories/1/instance_groups/", + "inventory_sources": "/api/v2/inventories/1/inventory_sources/", + "job_templates": "/api/v2/inventories/1/job_templates/", + "labels": "/api/v2/inventories/1/labels/", + "object_roles": "/api/v2/inventories/1/object_roles/", + "organization": "/api/v2/organizations/1/", + "root_groups": "/api/v2/inventories/1/root_groups/", + "script": "/api/v2/inventories/1/script/", + "tree": "/api/v2/inventories/1/tree/", + "update_inventory_sources": "/api/v2/inventories/1/update_inventory_sources/", + "variable_data": "/api/v2/inventories/1/variable_data/" + }, + "summary_fields": { + "labels": { + "count": 0, + "results": [] + }, + "object_roles": { + "adhoc_role": { + "description": "May run ad hoc commands on the inventory", + "id": 18, + "name": "Ad Hoc" + }, + "admin_role": { + "description": "Can manage all aspects of the inventory", + "id": 16, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the inventory", + "id": 20, + "name": "Read" + }, + "update_role": { + "description": "May update the inventory", + "id": 17, + "name": "Update" + }, + "use_role": { + "description": "Can use the inventory in a job template", + "id": 19, + "name": "Use" + } + }, + "organization": { + "description": "test-org-desc", + "id": 1, + "name": "test-org" + }, + "user_capabilities": { + "adhoc": true, + "copy": false, + "delete": true, + "edit": true + } + }, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0, + "type": "inventory", + "url": "/api/v2/inventories/1/", + "variables": "" + } + }, + "schema": { + "$ref": "#/definitions/Inventory" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventories/{id}/access_list/": { + "get": { + "description": "users.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_access_list_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ResourceAccessListElement" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventories/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventories/{id}/ad_hoc_commands/": { + "get": { + "description": "ad hoc commands associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of ad hoc commands\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more ad hoc command records. \n\n## Results\n\nEach ad hoc command data structure includes the following fields:\n\n* `id`: Database ID for this ad hoc command. (integer)\n* `type`: Data type for this ad hoc command. (choice)\n* `url`: URL for this ad hoc command. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this ad hoc command was created. (datetime)\n* `modified`: Timestamp when this ad hoc command was last modified. (datetime)\n* `name`: Name of this ad hoc command. (string)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `limit`: (string)\n* `credential`: (id)\n* `module_name`: (choice)\n - `command`\n - `shell`\n - `yum`\n - `apt`\n - `apt_key`\n - `apt_repository`\n - `apt_rpm`\n - `service`\n - `group`\n - `user`\n - `mount`\n - `ping`\n - `selinux`\n - `setup`\n - `win_ping`\n - `win_service`\n - `win_updates`\n - `win_group`\n - `win_user`\n* `module_args`: (string)\n* `forks`: (integer)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (string)\n* `become_enabled`: (boolean)\n* `diff_mode`: (boolean)\n\n\n\n## Sorting\n\nTo specify that ad hoc commands are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_ad_hoc_commands_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/AdHocCommandList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "ad hoc commands associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of ad hoc commands\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more ad hoc command records. \n\n## Results\n\nEach ad hoc command data structure includes the following fields:\n\n* `id`: Database ID for this ad hoc command. (integer)\n* `type`: Data type for this ad hoc command. (choice)\n* `url`: URL for this ad hoc command. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this ad hoc command was created. (datetime)\n* `modified`: Timestamp when this ad hoc command was last modified. (datetime)\n* `name`: Name of this ad hoc command. (string)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `limit`: (string)\n* `credential`: (id)\n* `module_name`: (choice)\n - `command`\n - `shell`\n - `yum`\n - `apt`\n - `apt_key`\n - `apt_repository`\n - `apt_rpm`\n - `service`\n - `group`\n - `user`\n - `mount`\n - `ping`\n - `selinux`\n - `setup`\n - `win_ping`\n - `win_service`\n - `win_updates`\n - `win_group`\n - `win_user`\n* `module_args`: (string)\n* `forks`: (integer)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (string)\n* `become_enabled`: (boolean)\n* `diff_mode`: (boolean)\n\n\n\n## Sorting\n\nTo specify that ad hoc commands are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_ad_hoc_commands_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "credential": 1, + "inventory": 1, + "module_args": "uptime", + "module_name": "command" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "become_enabled": false, + "canceled_on": null, + "controller_node": "", + "created": "2018-02-01T08:00:00.000000Z", + "credential": 1, + "diff_mode": false, + "elapsed": 0.0, + "execution_environment": null, + "execution_node": "", + "extra_vars": "", + "failed": false, + "finished": null, + "forks": 0, + "id": 2, + "inventory": 1, + "job_explanation": "", + "job_type": "run", + "launch_type": "manual", + "launched_by": {}, + "limit": "", + "modified": "2018-02-01T08:00:00.000000Z", + "module_args": "uptime", + "module_name": "command", + "name": "command", + "related": { + "activity_stream": "/api/v2/ad_hoc_commands/2/activity_stream/", + "cancel": "/api/v2/ad_hoc_commands/2/cancel/", + "credential": "/api/v2/credentials/1/", + "events": "/api/v2/ad_hoc_commands/2/events/", + "inventory": "/api/v2/inventories/1/", + "notifications": "/api/v2/ad_hoc_commands/2/notifications/", + "relaunch": "/api/v2/ad_hoc_commands/2/relaunch/", + "stdout": "/api/v2/ad_hoc_commands/2/stdout/" + }, + "started": null, + "status": "new", + "summary_fields": { + "credential": { + "cloud": false, + "credential_type_id": 1, + "description": "", + "id": 1, + "kind": "ssh", + "kubernetes": false, + "name": "machine-cred" + }, + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + }, + "user_capabilities": { + "delete": true, + "start": true + } + }, + "type": "ad_hoc_command", + "url": "/api/v2/ad_hoc_commands/2/", + "verbosity": 0, + "work_unit_id": null + } + }, + "schema": { + "$ref": "#/definitions/AdHocCommandList" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventories/{id}/copy/": { + "get": { + "description": "", + "operationId": "api_inventories_copy_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Copy" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "No Description for get on /api/{version}/inventories/{id}/copy/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_inventories_copy_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Copy" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Copy" + } + } + }, + "summary": "No Description for post on /api/{version}/inventories/{id}/copy/", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventories/{id}/groups/": { + "get": { + "description": "groups associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more group records. \n\n## Results\n\nEach group data structure includes the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n## Sorting\n\nTo specify that groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_groups_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 0, + "next": null, + "previous": null, + "results": [] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Group" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "groups associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more group records. \n\n## Results\n\nEach group data structure includes the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n## Sorting\n\nTo specify that groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_groups_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "description": "Hello world", + "name": "New name" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "Hello world", + "id": 1, + "inventory": 1, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "New name", + "related": { + "activity_stream": "/api/v2/groups/1/activity_stream/", + "ad_hoc_commands": "/api/v2/groups/1/ad_hoc_commands/", + "all_hosts": "/api/v2/groups/1/all_hosts/", + "children": "/api/v2/groups/1/children/", + "hosts": "/api/v2/groups/1/hosts/", + "inventory": "/api/v2/inventories/1/", + "inventory_sources": "/api/v2/groups/1/inventory_sources/", + "job_events": "/api/v2/groups/1/job_events/", + "job_host_summaries": "/api/v2/groups/1/job_host_summaries/", + "potential_children": "/api/v2/groups/1/potential_children/", + "variable_data": "/api/v2/groups/1/variable_data/" + }, + "summary_fields": { + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + }, + "user_capabilities": { + "copy": true, + "delete": true, + "edit": true + } + }, + "type": "group", + "url": "/api/v2/groups/1/", + "variables": "" + } + }, + "schema": { + "$ref": "#/definitions/Group" + } + }, + "400": { + "examples": { + "application/json": { + "inventory": { + "detail": "Cannot create Group for Smart or Constructed Inventories" + } + } + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventories/{id}/hosts/": { + "get": { + "description": "hosts associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of hosts\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more host records. \n\n## Results\n\nEach host data structure includes the following fields:\n\n* `id`: Database ID for this host. (integer)\n* `type`: Data type for this host. (choice)\n* `url`: URL for this host. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this host was created. (datetime)\n* `modified`: Timestamp when this host was last modified. (datetime)\n* `name`: Name of this host. (string)\n* `description`: Optional description of this host. (string)\n* `inventory`: (id)\n* `enabled`: Is this host online and available for running jobs? (boolean)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string)\n* `variables`: Host variables in JSON or YAML format. (json)\n* `has_active_failures`: (field)\n* `has_inventory_sources`: (field)\n* `last_job`: (id)\n* `last_job_host_summary`: (id)\n* `ansible_facts_modified`: The date and time ansible_facts was last modified. (datetime)\n\n\n\n## Sorting\n\nTo specify that hosts are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_hosts_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 3, + "next": null, + "previous": null, + "results": [ + { + "ansible_facts_modified": null, + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "enabled": true, + "has_active_failures": false, + "has_inventory_sources": true, + "id": 1, + "instance_id": "", + "inventory": 1, + "last_job": null, + "last_job_host_summary": null, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "1", + "related": { + "activity_stream": "/api/v2/hosts/1/activity_stream/", + "ad_hoc_command_events": "/api/v2/hosts/1/ad_hoc_command_events/", + "ad_hoc_commands": "/api/v2/hosts/1/ad_hoc_commands/", + "all_groups": "/api/v2/hosts/1/all_groups/", + "ansible_facts": "/api/v2/hosts/1/ansible_facts/", + "groups": "/api/v2/hosts/1/groups/", + "inventory": "/api/v2/inventories/1/", + "inventory_sources": "/api/v2/hosts/1/inventory_sources/", + "job_events": "/api/v2/hosts/1/job_events/", + "job_host_summaries": "/api/v2/hosts/1/job_host_summaries/", + "smart_inventories": "/api/v2/hosts/1/smart_inventories/", + "variable_data": "/api/v2/hosts/1/variable_data/" + }, + "summary_fields": { + "groups": { + "count": 0, + "results": [] + }, + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": true, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 1 + }, + "recent_jobs": [], + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "host", + "url": "/api/v2/hosts/1/", + "variables": "" + }, + { + "ansible_facts_modified": null, + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "enabled": true, + "has_active_failures": false, + "has_inventory_sources": true, + "id": 2, + "instance_id": "", + "inventory": 1, + "last_job": null, + "last_job_host_summary": null, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "2", + "related": { + "activity_stream": "/api/v2/hosts/2/activity_stream/", + "ad_hoc_command_events": "/api/v2/hosts/2/ad_hoc_command_events/", + "ad_hoc_commands": "/api/v2/hosts/2/ad_hoc_commands/", + "all_groups": "/api/v2/hosts/2/all_groups/", + "ansible_facts": "/api/v2/hosts/2/ansible_facts/", + "groups": "/api/v2/hosts/2/groups/", + "inventory": "/api/v2/inventories/1/", + "inventory_sources": "/api/v2/hosts/2/inventory_sources/", + "job_events": "/api/v2/hosts/2/job_events/", + "job_host_summaries": "/api/v2/hosts/2/job_host_summaries/", + "smart_inventories": "/api/v2/hosts/2/smart_inventories/", + "variable_data": "/api/v2/hosts/2/variable_data/" + }, + "summary_fields": { + "groups": { + "count": 0, + "results": [] + }, + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": true, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 1 + }, + "recent_jobs": [], + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "host", + "url": "/api/v2/hosts/2/", + "variables": "" + }, + { + "ansible_facts_modified": null, + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "enabled": true, + "has_active_failures": false, + "has_inventory_sources": true, + "id": 3, + "instance_id": "", + "inventory": 1, + "last_job": null, + "last_job_host_summary": null, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "3", + "related": { + "activity_stream": "/api/v2/hosts/3/activity_stream/", + "ad_hoc_command_events": "/api/v2/hosts/3/ad_hoc_command_events/", + "ad_hoc_commands": "/api/v2/hosts/3/ad_hoc_commands/", + "all_groups": "/api/v2/hosts/3/all_groups/", + "ansible_facts": "/api/v2/hosts/3/ansible_facts/", + "groups": "/api/v2/hosts/3/groups/", + "inventory": "/api/v2/inventories/1/", + "inventory_sources": "/api/v2/hosts/3/inventory_sources/", + "job_events": "/api/v2/hosts/3/job_events/", + "job_host_summaries": "/api/v2/hosts/3/job_host_summaries/", + "smart_inventories": "/api/v2/hosts/3/smart_inventories/", + "variable_data": "/api/v2/hosts/3/variable_data/" + }, + "summary_fields": { + "groups": { + "count": 0, + "results": [] + }, + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": true, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 1 + }, + "recent_jobs": [], + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "host", + "url": "/api/v2/hosts/3/", + "variables": "" + } + ] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Host" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "hosts associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of hosts\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more host records. \n\n## Results\n\nEach host data structure includes the following fields:\n\n* `id`: Database ID for this host. (integer)\n* `type`: Data type for this host. (choice)\n* `url`: URL for this host. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this host was created. (datetime)\n* `modified`: Timestamp when this host was last modified. (datetime)\n* `name`: Name of this host. (string)\n* `description`: Optional description of this host. (string)\n* `inventory`: (id)\n* `enabled`: Is this host online and available for running jobs? (boolean)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string)\n* `variables`: Host variables in JSON or YAML format. (json)\n* `has_active_failures`: (field)\n* `has_inventory_sources`: (field)\n* `last_job`: (id)\n* `last_job_host_summary`: (id)\n* `ansible_facts_modified`: The date and time ansible_facts was last modified. (datetime)\n\n\n\n## Sorting\n\nTo specify that hosts are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_hosts_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "description": "Hello world", + "name": "New name" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "ansible_facts_modified": null, + "created": "2018-02-01T08:00:00.000000Z", + "description": "Hello world", + "enabled": true, + "has_active_failures": false, + "has_inventory_sources": false, + "id": 4, + "instance_id": "", + "inventory": 1, + "last_job": null, + "last_job_host_summary": null, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "New name", + "related": { + "activity_stream": "/api/v2/hosts/4/activity_stream/", + "ad_hoc_command_events": "/api/v2/hosts/4/ad_hoc_command_events/", + "ad_hoc_commands": "/api/v2/hosts/4/ad_hoc_commands/", + "all_groups": "/api/v2/hosts/4/all_groups/", + "ansible_facts": "/api/v2/hosts/4/ansible_facts/", + "groups": "/api/v2/hosts/4/groups/", + "inventory": "/api/v2/inventories/1/", + "inventory_sources": "/api/v2/hosts/4/inventory_sources/", + "job_events": "/api/v2/hosts/4/job_events/", + "job_host_summaries": "/api/v2/hosts/4/job_host_summaries/", + "smart_inventories": "/api/v2/hosts/4/smart_inventories/", + "variable_data": "/api/v2/hosts/4/variable_data/" + }, + "summary_fields": { + "groups": { + "count": 0, + "results": [] + }, + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + }, + "recent_jobs": [], + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "host", + "url": "/api/v2/hosts/4/", + "variables": "" + } + }, + "schema": { + "$ref": "#/definitions/Host" + } + }, + "400": { + "examples": { + "application/json": { + "__all__": [ + "A Group with that name already exists." + ] + } + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventories/{id}/input_inventories/": { + "get": { + "description": "inventories associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventories\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory records. \n\n## Results\n\nEach inventory data structure includes the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `host_filter`: Filter that will be applied to the hosts of this inventory. (string)\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n## Sorting\n\nTo specify that inventories are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_input_inventories_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Inventory" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "inventories associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventories\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory records. \n\n## Results\n\nEach inventory data structure includes the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `host_filter`: Filter that will be applied to the hosts of this inventory. (string)\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n## Sorting\n\nTo specify that inventories are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_input_inventories_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Inventory" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Inventory" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventories/{id}/instance_groups/": { + "get": { + "description": "instance groups associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_instance_groups_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 5, + "next": null, + "previous": null, + "results": [ + { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 5, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "group-0", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/5/access_list/", + "instances": "/api/v2/instance_groups/5/instances/", + "jobs": "/api/v2/instance_groups/5/jobs/", + "object_roles": "/api/v2/instance_groups/5/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 33, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 35, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 34, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/5/" + }, + { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 4, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "group-4", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/4/access_list/", + "instances": "/api/v2/instance_groups/4/instances/", + "jobs": "/api/v2/instance_groups/4/jobs/", + "object_roles": "/api/v2/instance_groups/4/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 30, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 32, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 31, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/4/" + }, + { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 3, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "group-1", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/3/access_list/", + "instances": "/api/v2/instance_groups/3/instances/", + "jobs": "/api/v2/instance_groups/3/jobs/", + "object_roles": "/api/v2/instance_groups/3/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 27, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 29, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 28, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/3/" + }, + { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 1, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "group-3", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/1/access_list/", + "instances": "/api/v2/instance_groups/1/instances/", + "jobs": "/api/v2/instance_groups/1/jobs/", + "object_roles": "/api/v2/instance_groups/1/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 21, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 23, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 22, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/1/" + }, + { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 2, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "group-2", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/2/access_list/", + "instances": "/api/v2/instance_groups/2/instances/", + "jobs": "/api/v2/instance_groups/2/jobs/", + "object_roles": "/api/v2/instance_groups/2/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 24, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 26, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 25, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/2/" + } + ] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InstanceGroup" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "instance groups associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_instance_groups_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "associate": true, + "id": 2 + } + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventories/{id}/inventory_sources/": { + "get": { + "description": "inventory sources associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventory sources\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory source records. \n\n## Results\n\nEach inventory source data structure includes the following fields:\n\n* `id`: Database ID for this inventory source. (integer)\n* `type`: Data type for this inventory source. (choice)\n* `url`: URL for this inventory source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory source was created. (datetime)\n* `modified`: Timestamp when this inventory source was last modified. (datetime)\n* `name`: Name of this inventory source. (string)\n* `description`: Optional description of this inventory source. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `none`: No External Source\n* `execution_environment`: The container image to be used for execution. (id)\n* `inventory`: (id)\n* `update_on_launch`: (boolean)\n* `update_cache_timeout`: (integer)\n* `source_project`: Project containing inventory file used as source. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n## Sorting\n\nTo specify that inventory sources are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_inventory_sources_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 0, + "next": null, + "previous": null, + "results": [] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InventorySource" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "inventory sources associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventory sources\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory source records. \n\n## Results\n\nEach inventory source data structure includes the following fields:\n\n* `id`: Database ID for this inventory source. (integer)\n* `type`: Data type for this inventory source. (choice)\n* `url`: URL for this inventory source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory source was created. (datetime)\n* `modified`: Timestamp when this inventory source was last modified. (datetime)\n* `name`: Name of this inventory source. (string)\n* `description`: Optional description of this inventory source. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `none`: No External Source\n* `execution_environment`: The container image to be used for execution. (id)\n* `inventory`: (id)\n* `update_on_launch`: (boolean)\n* `update_cache_timeout`: (integer)\n* `source_project`: Project containing inventory file used as source. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n## Sorting\n\nTo specify that inventory sources are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_inventory_sources_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "name": "ec2-inv-source", + "source": "ec2" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "custom_virtualenv": null, + "description": "", + "enabled_value": "", + "enabled_var": "", + "execution_environment": null, + "host_filter": "", + "id": 1, + "inventory": 1, + "last_job_failed": false, + "last_job_run": null, + "last_update_failed": false, + "last_updated": null, + "limit": "", + "modified": "2018-02-01T08:00:00.000000Z", + "name": "ec2-inv-source", + "next_job_run": null, + "overwrite": false, + "overwrite_vars": false, + "related": { + "activity_stream": "/api/v2/inventory_sources/1/activity_stream/", + "credentials": "/api/v2/inventory_sources/1/credentials/", + "groups": "/api/v2/inventory_sources/1/groups/", + "hosts": "/api/v2/inventory_sources/1/hosts/", + "inventory": "/api/v2/inventories/1/", + "inventory_updates": "/api/v2/inventory_sources/1/inventory_updates/", + "notification_templates_error": "/api/v2/inventory_sources/1/notification_templates_error/", + "notification_templates_started": "/api/v2/inventory_sources/1/notification_templates_started/", + "notification_templates_success": "/api/v2/inventory_sources/1/notification_templates_success/", + "schedules": "/api/v2/inventory_sources/1/schedules/", + "update": "/api/v2/inventory_sources/1/update/" + }, + "scm_branch": "", + "source": "ec2", + "source_path": "", + "source_project": null, + "source_vars": "", + "status": "never updated", + "summary_fields": { + "credentials": [], + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": true, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 1 + }, + "organization": { + "description": "test-org-desc", + "id": 1, + "name": "test-org" + }, + "user_capabilities": { + "delete": true, + "edit": true, + "schedule": true, + "start": true + } + }, + "timeout": 0, + "type": "inventory_source", + "update_cache_timeout": 0, + "update_on_launch": false, + "url": "/api/v2/inventory_sources/1/", + "verbosity": 1 + } + }, + "schema": { + "$ref": "#/definitions/InventorySource" + } + }, + "400": { + "examples": { + "application/json": { + "inventory": { + "detail": "Cannot create Inventory Source for Smart or Constructed Inventories" + }, + "source": [ + "\"\" is not a valid choice." + ] + } + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventories/{id}/job_templates/": { + "get": { + "description": "job templates associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of job templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more job template records. \n\n## Results\n\nEach job template data structure includes the following fields:\n\n* `id`: Database ID for this job template. (integer)\n* `type`: Data type for this job template. (choice)\n* `url`: URL for this job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job template was created. (datetime)\n* `modified`: Timestamp when this job template was last modified. (datetime)\n* `name`: Name of this job template. (string)\n* `description`: Optional description of this job template. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `project`: (id)\n* `playbook`: (string)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `forks`: (integer)\n* `limit`: (string)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json)\n* `job_tags`: (string)\n* `force_handlers`: (boolean)\n* `skip_tags`: (string)\n* `start_at_task`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean)\n* `organization`: The organization used to determine access to this template. (id)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n* `execution_environment`: The container image to be used for execution. (id)\n* `host_config_key`: (string)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_diff_mode_on_launch`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_job_type_on_launch`: (boolean)\n* `ask_verbosity_on_launch`: (boolean)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_credential_on_launch`: (boolean)\n* `ask_execution_environment_on_launch`: (boolean)\n* `ask_labels_on_launch`: (boolean)\n* `ask_forks_on_launch`: (boolean)\n* `ask_job_slice_count_on_launch`: (boolean)\n* `ask_timeout_on_launch`: (boolean)\n* `ask_instance_groups_on_launch`: (boolean)\n* `survey_enabled`: (boolean)\n* `become_enabled`: (boolean)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean)\n* `allow_simultaneous`: (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `job_slice_count`: The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1. (integer)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `prevent_instance_group_fallback`: If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n## Sorting\n\nTo specify that job templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_job_templates_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/JobTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventories/{id}/labels/": { + "get": { + "description": "labels associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of labels\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more label records. \n\n## Results\n\nEach label data structure includes the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n## Sorting\n\nTo specify that labels are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_labels_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Label" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "labels associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of labels\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more label records. \n\n## Results\n\nEach label data structure includes the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n## Sorting\n\nTo specify that labels are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_labels_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Label" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Label" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventories/{id}/object_roles/": { + "get": { + "description": "roles associated with the selected\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of roles\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more role records. \n\n## Results\n\nEach role data structure includes the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)\n\n\n\n## Sorting\n\nTo specify that roles are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_object_roles_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Role" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventories/{id}/root_groups/": { + "get": { + "description": "groups associated with this\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more group records. \n\n## Results\n\nEach group data structure includes the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n## Sorting\n\nTo specify that groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_root_groups_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Group" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of root (top-level)", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "groups associated with this\ninventory.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more group records. \n\n## Results\n\nEach group data structure includes the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n## Sorting\n\nTo specify that groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventories_root_groups_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Group" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Group" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of root (top-level)", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventories/{id}/script/": { + "get": { + "description": "for more information on inventory scripts.\n\n## List Response\n\nMake a GET request to this resource without query parameters to retrieve a JSON\nobject containing groups, including the hosts, children and variables for each\ngroup. The response data is equivalent to that returned by passing the\n`--list` argument to an inventory script.\n\nSpecify a query string of `?hostvars=1` to retrieve the JSON\nobject above including all host variables. The `['_meta']['hostvars']` object\nin the response contains an entry for each host with its variables. This\nresponse format can be used with Ansible 1.3 and later to avoid making a\nseparate API request for each host. Refer to\n[Tuning the External Inventory Script](http://docs.ansible.com/developing_inventory.html#tuning-the-external-inventory-script)\nfor more information on this feature.\n\nBy default, the inventory script will only return hosts that\nare enabled in the inventory. This feature allows disabled hosts to be skipped\nwhen running jobs without removing them from the inventory. Specify a query\nstring of `?all=1` to return all hosts, including disabled ones.\n\nSpecify a query string of `?towervars=1` to add variables\nto the hostvars of each host that specifies its enabled state and database ID.\n\nSpecify a query string of `?subset=slice2of5` to produce an inventory that\nhas a restricted number of hosts according to the rules of job slicing.\n\nTo apply multiple query strings, join them with the `&` character, like `?hostvars=1&all=1`.\n\n## Host Response\n\nMake a GET request to this resource with a query string similar to\n`?host=HOSTNAME` to retrieve a JSON object containing host variables for the\nspecified host. The response data is equivalent to that returned by passing\nthe `--host HOSTNAME` argument to an inventory script.", + "operationId": "api_inventories_script_read", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "all": { + "hosts": [ + "first_host", + "second_host" + ] + } + } + }, + "schema": { + "$ref": "#/definitions/InventoryScript" + } + } + }, + "summary": "Refer to [Dynamic Inventory](http://docs.ansible.com/intro_dynamic_inventory.html)", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventories/{id}/tree/": { + "get": { + "description": "associated with the selected inventory.\n\nThe resulting data structure contains a list of root groups, with each group\nalso containing a list of its children.\n\n## Results\n\nEach group data structure includes the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n* `children`: (field)", + "operationId": "api_inventories_tree_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/GroupTree" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a hierarchical view of groups", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventories/{id}/update_inventory_sources/": { + "get": { + "description": "this inventory can be updated. The response will include the following fields for each\ninventory source:\n\n* `inventory_source`: ID of the inventory_source\n (integer, read-only)\n* `can_update`: Flag indicating if this inventory source can be updated\n (boolean, read-only)\n\nMake a POST request to this resource to update the inventory sources. The response\nstatus code will be a 202. The response will contain the follow fields for each of the individual\ninventory sources:\n\n* `status`: `started` or message why the update could not be started.\n (string, read-only)\n* `inventory_update`: ID of the inventory update job that was started.\n (integer, read-only)\n* `project_update`: ID of the project update job that was started if this inventory source is an SCM source.\n (integer, read-only, optional)", + "operationId": "api_inventories_update_inventory_sources_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/InventorySourceUpdate" + } + } + }, + "summary": "Make a GET request to this resource to determine if any of the inventory sources for", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "this inventory can be updated. The response will include the following fields for each\ninventory source:\n\n* `inventory_source`: ID of the inventory_source\n (integer, read-only)\n* `can_update`: Flag indicating if this inventory source can be updated\n (boolean, read-only)\n\nMake a POST request to this resource to update the inventory sources. The response\nstatus code will be a 202. The response will contain the follow fields for each of the individual\ninventory sources:\n\n* `status`: `started` or message why the update could not be started.\n (string, read-only)\n* `inventory_update`: ID of the inventory update job that was started.\n (integer, read-only)\n* `project_update`: ID of the project update job that was started if this inventory source is an SCM source.\n (integer, read-only, optional)", + "operationId": "api_inventories_update_inventory_sources_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/InventorySourceUpdate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/InventorySourceUpdate" + } + } + }, + "summary": "Make a GET request to this resource to determine if any of the inventory sources for", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventories/{id}/variable_data/": { + "get": { + "consumes": [ + "application/json", + "application/yaml" + ], + "description": "inventory.\n\n\n\n# Update Inventory Variable Data:\n\nMake a PUT or PATCH request to this resource to update variables defined for a\ninventory.", + "operationId": "api_inventories_variable_data_read", + "parameters": [], + "produces": [ + "application/json", + "application/yaml" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": {} + }, + "schema": { + "$ref": "#/definitions/InventoryVariableData" + } + } + }, + "summary": "Make a GET request to this resource to retrieve all variables defined for a", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "consumes": [ + "application/json", + "application/yaml" + ], + "description": "inventory.\n\n\n\n# Update Inventory Variable Data:\n\nMake a PUT or PATCH request to this resource to update variables defined for a\ninventory.", + "operationId": "api_inventories_variable_data_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "host_filter": "bar" + } + } + } + ], + "produces": [ + "application/json", + "application/yaml" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "host_filter": "bar" + } + }, + "schema": { + "$ref": "#/definitions/InventoryVariableData" + } + } + }, + "summary": "Make a GET request to this resource to retrieve all variables defined for a", + "tags": [ + "api" + ] + }, + "put": { + "consumes": [ + "application/json", + "application/yaml" + ], + "description": "inventory.\n\n\n\n# Update Inventory Variable Data:\n\nMake a PUT or PATCH request to this resource to update variables defined for a\ninventory.", + "operationId": "api_inventories_variable_data_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "fooooo": "bar" + } + } + } + ], + "produces": [ + "application/json", + "application/yaml" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "fooooo": "bar" + } + }, + "schema": { + "$ref": "#/definitions/InventoryVariableData" + } + } + }, + "summary": "Make a GET request to this resource to retrieve all variables defined for a", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventory_sources/": { + "get": { + "description": "inventory sources.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventory sources\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory source records. \n\n## Results\n\nEach inventory source data structure includes the following fields:\n\n* `id`: Database ID for this inventory source. (integer)\n* `type`: Data type for this inventory source. (choice)\n* `url`: URL for this inventory source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory source was created. (datetime)\n* `modified`: Timestamp when this inventory source was last modified. (datetime)\n* `name`: Name of this inventory source. (string)\n* `description`: Optional description of this inventory source. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `none`: No External Source\n* `execution_environment`: The container image to be used for execution. (id)\n* `inventory`: (id)\n* `update_on_launch`: (boolean)\n* `update_cache_timeout`: (integer)\n* `source_project`: Project containing inventory file used as source. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n## Sorting\n\nTo specify that inventory sources are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InventorySource" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "inventory sources.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventory sources\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory source records. \n\n## Results\n\nEach inventory source data structure includes the following fields:\n\n* `id`: Database ID for this inventory source. (integer)\n* `type`: Data type for this inventory source. (choice)\n* `url`: URL for this inventory source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory source was created. (datetime)\n* `modified`: Timestamp when this inventory source was last modified. (datetime)\n* `name`: Name of this inventory source. (string)\n* `description`: Optional description of this inventory source. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `none`: No External Source\n* `execution_environment`: The container image to be used for execution. (id)\n* `inventory`: (id)\n* `update_on_launch`: (boolean)\n* `update_cache_timeout`: (integer)\n* `source_project`: Project containing inventory file used as source. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n## Sorting\n\nTo specify that inventory sources are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/InventorySource" + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/InventorySource" + } + }, + "400": { + "examples": { + "application/json": { + "source": [ + "\"\" is not a valid choice." + ], + "source_vars": [ + "`FOOBAR` is a prohibited environment variable" + ] + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventory_sources/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this inventory source. (integer)\n* `type`: Data type for this inventory source. (choice)\n* `url`: URL for this inventory source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory source was created. (datetime)\n* `modified`: Timestamp when this inventory source was last modified. (datetime)\n* `name`: Name of this inventory source. (string)\n* `description`: Optional description of this inventory source. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `none`: No External Source\n* `execution_environment`: The container image to be used for execution. (id)\n* `inventory`: (id)\n* `update_on_launch`: (boolean)\n* `update_cache_timeout`: (integer)\n* `source_project`: Project containing inventory file used as source. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n\n\n# Update an Inventory Source:\n\nMake a PUT or PATCH request to this resource to update this\ninventory source. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this inventory source. (string, required)\n* `description`: Optional description of this inventory source. (string, default=`\"\"`)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string, default=`\"\"`)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string, default=`\"\"`)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string, default=`\"\"`)\n* `credential`: Cloud credential to use for inventory updates. (integer, default=`None`)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string, default=`\"\"`)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string, default=`\"\"`)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string, default=`\"\"`)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean, default=`False`)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean, default=`False`)\n\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer, default=`0`)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO) (default)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string, default=`\"\"`)\n\n\n\n\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `inventory`: (id, required)\n* `update_on_launch`: (boolean, default=`False`)\n* `update_cache_timeout`: (integer, default=`0`)\n* `source_project`: Project containing inventory file used as source. (id, default=``)\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Inventory Source:\n\nMake a DELETE request to this resource to delete this inventory source.", + "operationId": "api_inventory_sources_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory source", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this inventory source. (integer)\n* `type`: Data type for this inventory source. (choice)\n* `url`: URL for this inventory source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory source was created. (datetime)\n* `modified`: Timestamp when this inventory source was last modified. (datetime)\n* `name`: Name of this inventory source. (string)\n* `description`: Optional description of this inventory source. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `none`: No External Source\n* `execution_environment`: The container image to be used for execution. (id)\n* `inventory`: (id)\n* `update_on_launch`: (boolean)\n* `update_cache_timeout`: (integer)\n* `source_project`: Project containing inventory file used as source. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n\n\n# Update an Inventory Source:\n\nMake a PUT or PATCH request to this resource to update this\ninventory source. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this inventory source. (string, required)\n* `description`: Optional description of this inventory source. (string, default=`\"\"`)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string, default=`\"\"`)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string, default=`\"\"`)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string, default=`\"\"`)\n* `credential`: Cloud credential to use for inventory updates. (integer, default=`None`)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string, default=`\"\"`)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string, default=`\"\"`)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string, default=`\"\"`)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean, default=`False`)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean, default=`False`)\n\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer, default=`0`)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO) (default)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string, default=`\"\"`)\n\n\n\n\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `inventory`: (id, required)\n* `update_on_launch`: (boolean, default=`False`)\n* `update_cache_timeout`: (integer, default=`0`)\n* `source_project`: Project containing inventory file used as source. (id, default=``)\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Inventory Source:\n\nMake a DELETE request to this resource to delete this inventory source.", + "operationId": "api_inventory_sources_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/InventorySource" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory source", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this inventory source. (integer)\n* `type`: Data type for this inventory source. (choice)\n* `url`: URL for this inventory source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory source was created. (datetime)\n* `modified`: Timestamp when this inventory source was last modified. (datetime)\n* `name`: Name of this inventory source. (string)\n* `description`: Optional description of this inventory source. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `none`: No External Source\n* `execution_environment`: The container image to be used for execution. (id)\n* `inventory`: (id)\n* `update_on_launch`: (boolean)\n* `update_cache_timeout`: (integer)\n* `source_project`: Project containing inventory file used as source. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n\n\n# Update an Inventory Source:\n\nMake a PUT or PATCH request to this resource to update this\ninventory source. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this inventory source. (string, required)\n* `description`: Optional description of this inventory source. (string, default=`\"\"`)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string, default=`\"\"`)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string, default=`\"\"`)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string, default=`\"\"`)\n* `credential`: Cloud credential to use for inventory updates. (integer, default=`None`)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string, default=`\"\"`)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string, default=`\"\"`)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string, default=`\"\"`)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean, default=`False`)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean, default=`False`)\n\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer, default=`0`)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO) (default)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string, default=`\"\"`)\n\n\n\n\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `inventory`: (id, required)\n* `update_on_launch`: (boolean, default=`False`)\n* `update_cache_timeout`: (integer, default=`0`)\n* `source_project`: Project containing inventory file used as source. (id, default=``)\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Inventory Source:\n\nMake a DELETE request to this resource to delete this inventory source.", + "operationId": "api_inventory_sources_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/InventorySource" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/InventorySource" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory source", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this inventory source. (integer)\n* `type`: Data type for this inventory source. (choice)\n* `url`: URL for this inventory source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory source was created. (datetime)\n* `modified`: Timestamp when this inventory source was last modified. (datetime)\n* `name`: Name of this inventory source. (string)\n* `description`: Optional description of this inventory source. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `none`: No External Source\n* `execution_environment`: The container image to be used for execution. (id)\n* `inventory`: (id)\n* `update_on_launch`: (boolean)\n* `update_cache_timeout`: (integer)\n* `source_project`: Project containing inventory file used as source. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n\n\n# Update an Inventory Source:\n\nMake a PUT or PATCH request to this resource to update this\ninventory source. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this inventory source. (string, required)\n* `description`: Optional description of this inventory source. (string, default=`\"\"`)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string, default=`\"\"`)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string, default=`\"\"`)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string, default=`\"\"`)\n* `credential`: Cloud credential to use for inventory updates. (integer, default=`None`)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string, default=`\"\"`)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string, default=`\"\"`)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string, default=`\"\"`)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean, default=`False`)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean, default=`False`)\n\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer, default=`0`)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO) (default)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string, default=`\"\"`)\n\n\n\n\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `inventory`: (id, required)\n* `update_on_launch`: (boolean, default=`False`)\n* `update_cache_timeout`: (integer, default=`0`)\n* `source_project`: Project containing inventory file used as source. (id, default=``)\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Inventory Source:\n\nMake a DELETE request to this resource to delete this inventory source.", + "operationId": "api_inventory_sources_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/InventorySource" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/InventorySource" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory source", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventory_sources/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventory_sources/{id}/credentials/": { + "get": { + "description": "credentials associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_credentials_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Credential" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "credentials associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_credentials_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Credential" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Credential" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventory_sources/{id}/groups/": { + "delete": { + "description": "groups associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more group records. \n\n## Results\n\nEach group data structure includes the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n## Sorting\n\nTo specify that groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_groups_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "get": { + "description": "groups associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more group records. \n\n## Results\n\nEach group data structure includes the following fields:\n\n* `id`: Database ID for this group. (integer)\n* `type`: Data type for this group. (choice)\n* `url`: URL for this group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this group was created. (datetime)\n* `modified`: Timestamp when this group was last modified. (datetime)\n* `name`: Name of this group. (string)\n* `description`: Optional description of this group. (string)\n* `inventory`: (id)\n* `variables`: Group variables in JSON or YAML format. (json)\n\n\n\n## Sorting\n\nTo specify that groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_groups_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Group" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventory_sources/{id}/hosts/": { + "delete": { + "description": "hosts associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of hosts\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more host records. \n\n## Results\n\nEach host data structure includes the following fields:\n\n* `id`: Database ID for this host. (integer)\n* `type`: Data type for this host. (choice)\n* `url`: URL for this host. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this host was created. (datetime)\n* `modified`: Timestamp when this host was last modified. (datetime)\n* `name`: Name of this host. (string)\n* `description`: Optional description of this host. (string)\n* `inventory`: (id)\n* `enabled`: Is this host online and available for running jobs? (boolean)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string)\n* `variables`: Host variables in JSON or YAML format. (json)\n* `has_active_failures`: (field)\n* `has_inventory_sources`: (field)\n* `last_job`: (id)\n* `last_job_host_summary`: (id)\n* `ansible_facts_modified`: The date and time ansible_facts was last modified. (datetime)\n\n\n\n## Sorting\n\nTo specify that hosts are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_hosts_delete", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "204": { + "description": "" + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "get": { + "description": "hosts associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of hosts\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more host records. \n\n## Results\n\nEach host data structure includes the following fields:\n\n* `id`: Database ID for this host. (integer)\n* `type`: Data type for this host. (choice)\n* `url`: URL for this host. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this host was created. (datetime)\n* `modified`: Timestamp when this host was last modified. (datetime)\n* `name`: Name of this host. (string)\n* `description`: Optional description of this host. (string)\n* `inventory`: (id)\n* `enabled`: Is this host online and available for running jobs? (boolean)\n* `instance_id`: The value used by the remote inventory source to uniquely identify the host (string)\n* `variables`: Host variables in JSON or YAML format. (json)\n* `has_active_failures`: (field)\n* `has_inventory_sources`: (field)\n* `last_job`: (id)\n* `last_job_host_summary`: (id)\n* `ansible_facts_modified`: The date and time ansible_facts was last modified. (datetime)\n\n\n\n## Sorting\n\nTo specify that hosts are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_hosts_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Host" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventory_sources/{id}/inventory_updates/": { + "get": { + "description": "inventory updates associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventory updates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory update records. \n\n## Results\n\nEach inventory update data structure includes the following fields:\n\n* `id`: Database ID for this inventory update. (integer)\n* `type`: Data type for this inventory update. (choice)\n* `url`: URL for this inventory update. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory update was created. (datetime)\n* `modified`: Timestamp when this inventory update was last modified. (datetime)\n* `name`: Name of this inventory update. (string)\n* `description`: Optional description of this inventory update. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `inventory`: (id)\n* `inventory_source`: (id)\n* `license_error`: (boolean)\n* `org_host_limit_error`: (boolean)\n* `source_project_update`: Inventory files from this Project Update were used for the inventory update. (id)\n* `instance_group`: The Instance group the job was run under (id)\n* `scm_revision`: The SCM Revision from the Project used for this inventory update. Only applicable to inventories source from scm (string)\n\n\n\n## Sorting\n\nTo specify that inventory updates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_inventory_updates_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InventoryUpdateList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventory_sources/{id}/notification_templates_error/": { + "get": { + "description": "notification templates associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_notification_templates_error_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_notification_templates_error_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventory_sources/{id}/notification_templates_started/": { + "get": { + "description": "notification templates associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_notification_templates_started_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_notification_templates_started_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventory_sources/{id}/notification_templates_success/": { + "get": { + "description": "notification templates associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_notification_templates_success_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_notification_templates_success_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + }, + "400": { + "examples": { + "application/json": { + "msg": "Notification Templates can only be assigned when source is one of ['azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights', 'scm']." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventory_sources/{id}/schedules/": { + "get": { + "description": "schedules associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of schedules\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more schedule records. \n\n## Results\n\nEach schedule data structure includes the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n## Sorting\n\nTo specify that schedules are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_schedules_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Schedule" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "schedules associated with the selected\ninventory source.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of schedules\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more schedule records. \n\n## Results\n\nEach schedule data structure includes the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n## Sorting\n\nTo specify that schedules are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_sources_schedules_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Schedule" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Schedule" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventory_sources/{id}/update/": { + "get": { + "description": "from its inventory source. The response will include the following field:\n\n* `can_update`: Flag indicating if this inventory source can be updated\n (boolean, read-only)\n\nMake a POST request to this resource to update the inventory source. If\nsuccessful, the response status code will be 202. If the inventory source is\nnot defined or cannot be updated, a 405 status code will be returned.", + "operationId": "api_inventory_sources_update_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/InventorySourceUpdate" + } + } + }, + "summary": "Make a GET request to this resource to determine if the group can be updated", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "from its inventory source. The response will include the following field:\n\n* `can_update`: Flag indicating if this inventory source can be updated\n (boolean, read-only)\n\nMake a POST request to this resource to update the inventory source. If\nsuccessful, the response status code will be 202. If the inventory source is\nnot defined or cannot be updated, a 405 status code will be returned.", + "operationId": "api_inventory_sources_update_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/InventorySourceUpdate" + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/InventorySourceUpdate" + } + }, + "202": { + "examples": { + "application/json": { + "canceled_on": null, + "controller_node": "", + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "custom_virtualenv": null, + "description": "", + "elapsed": 0.0, + "enabled_value": "", + "enabled_var": "", + "event_processing_finished": false, + "execution_environment": null, + "execution_node": "", + "failed": false, + "finished": null, + "host_filter": "", + "id": 1, + "instance_group": null, + "inventory": 1, + "inventory_source": 1, + "inventory_update": 1, + "job_args": "", + "job_cwd": "", + "job_env": {}, + "job_explanation": "", + "launch_type": "manual", + "launched_by": {}, + "license_error": false, + "limit": "", + "modified": "2018-02-01T08:00:00.000000Z", + "name": "test-inv - single-inv-src", + "org_host_limit_error": false, + "overwrite": false, + "overwrite_vars": false, + "related": { + "cancel": "/api/v2/inventory_updates/1/cancel/", + "credentials": "/api/v2/inventory_updates/1/credentials/", + "events": "/api/v2/inventory_updates/1/events/", + "inventory": "/api/v2/inventories/1/", + "inventory_source": "/api/v2/inventory_sources/1/", + "notifications": "/api/v2/inventory_updates/1/notifications/", + "stdout": "/api/v2/inventory_updates/1/stdout/", + "unified_job_template": "/api/v2/inventory_sources/1/" + }, + "result_traceback": "", + "scm_branch": "", + "scm_revision": "", + "source": "ec2", + "source_path": "", + "source_project": null, + "source_project_update": null, + "source_vars": "", + "started": null, + "status": "pending", + "summary_fields": { + "credentials": [], + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": true, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 1 + }, + "inventory_source": { + "id": 1, + "name": "single-inv-src", + "source": "ec2", + "status": "pending" + }, + "organization": { + "description": "test-org-desc", + "id": 1, + "name": "test-org" + }, + "unified_job_template": { + "description": "", + "id": 1, + "name": "single-inv-src", + "unified_job_type": "inventory_update" + }, + "user_capabilities": { + "delete": false, + "start": false + } + }, + "timeout": 0, + "type": "inventory_update", + "unified_job_template": 1, + "url": "/api/v2/inventory_updates/1/", + "verbosity": 1, + "work_unit_id": null + } + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make a GET request to this resource to determine if the group can be updated", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventory_updates/": { + "get": { + "description": "inventory updates.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventory updates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory update records. \n\n## Results\n\nEach inventory update data structure includes the following fields:\n\n* `id`: Database ID for this inventory update. (integer)\n* `type`: Data type for this inventory update. (choice)\n* `url`: URL for this inventory update. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory update was created. (datetime)\n* `modified`: Timestamp when this inventory update was last modified. (datetime)\n* `name`: Name of this inventory update. (string)\n* `description`: Optional description of this inventory update. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `inventory`: (id)\n* `inventory_source`: (id)\n* `license_error`: (boolean)\n* `org_host_limit_error`: (boolean)\n* `source_project_update`: Inventory files from this Project Update were used for the inventory update. (id)\n* `instance_group`: The Instance group the job was run under (id)\n* `scm_revision`: The SCM Revision from the Project used for this inventory update. Only applicable to inventories source from scm (string)\n\n\n\n## Sorting\n\nTo specify that inventory updates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_updates_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InventoryUpdateList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventory_updates/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this inventory update. (integer)\n* `type`: Data type for this inventory update. (choice)\n* `url`: URL for this inventory update. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory update was created. (datetime)\n* `modified`: Timestamp when this inventory update was last modified. (datetime)\n* `name`: Name of this inventory update. (string)\n* `description`: Optional description of this inventory update. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_args`: (string)\n* `job_cwd`: (string)\n* `job_env`: (json)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `result_traceback`: (string)\n* `event_processing_finished`: Indicates whether all of the events generated by this unified job have been saved to the database. (boolean)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `inventory`: (id)\n* `inventory_source`: (id)\n* `license_error`: (boolean)\n* `org_host_limit_error`: (boolean)\n* `source_project_update`: Inventory files from this Project Update were used for the inventory update. (id)\n* `instance_group`: The Instance group the job was run under (id)\n* `scm_revision`: The SCM Revision from the Project used for this inventory update. Only applicable to inventories source from scm (string)\n* `source_project`: The project used for this job. (field)\n\n\n\n\n\n# Delete an Inventory Update:\n\nMake a DELETE request to this resource to delete this inventory update.", + "operationId": "api_inventory_updates_delete", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "204": { + "description": "" + }, + "403": { + "examples": { + "application/json": { + "detail": "Cannot delete running job resource." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory update", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this inventory update. (integer)\n* `type`: Data type for this inventory update. (choice)\n* `url`: URL for this inventory update. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory update was created. (datetime)\n* `modified`: Timestamp when this inventory update was last modified. (datetime)\n* `name`: Name of this inventory update. (string)\n* `description`: Optional description of this inventory update. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_args`: (string)\n* `job_cwd`: (string)\n* `job_env`: (json)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `result_traceback`: (string)\n* `event_processing_finished`: Indicates whether all of the events generated by this unified job have been saved to the database. (boolean)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `inventory`: (id)\n* `inventory_source`: (id)\n* `license_error`: (boolean)\n* `org_host_limit_error`: (boolean)\n* `source_project_update`: Inventory files from this Project Update were used for the inventory update. (id)\n* `instance_group`: The Instance group the job was run under (id)\n* `scm_revision`: The SCM Revision from the Project used for this inventory update. Only applicable to inventories source from scm (string)\n* `source_project`: The project used for this job. (field)\n\n\n\n\n\n# Delete an Inventory Update:\n\nMake a DELETE request to this resource to delete this inventory update.", + "operationId": "api_inventory_updates_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/InventoryUpdateDetail" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory update", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventory_updates/{id}/cancel/": { + "get": { + "description": "record containing the following fields:\n\n* `can_cancel`: (boolean)", + "operationId": "api_inventory_updates_cancel_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/InventoryUpdateCancel" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory update", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "record containing the following fields:\n\n* `can_cancel`: (boolean)", + "operationId": "api_inventory_updates_cancel_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/InventoryUpdateCancel" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/InventoryUpdateCancel" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single inventory update", + "tags": [ + "api" + ] + } + }, + "/api/v2/inventory_updates/{id}/credentials/": { + "get": { + "description": "credentials associated with the selected\ninventory update.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_updates_credentials_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Credential" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventory_updates/{id}/events/": { + "get": { + "description": "inventory update events associated with the selected\ninventory update.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventory update events\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory update event records. \n\n## Results\n\nEach inventory update event data structure includes the following fields:\n\n* `id`: Database ID for this inventory update event. (integer)\n* `type`: Data type for this inventory update event. (choice)\n* `url`: URL for this inventory update event. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory update event was created. (datetime)\n* `modified`: Timestamp when this inventory update event was last modified. (datetime)\n* `event`: (field)\n* `counter`: (integer)\n* `event_display`: (string)\n* `event_data`: (json)\n* `failed`: (field)\n* `changed`: (field)\n* `uuid`: (string)\n* `stdout`: (string)\n* `start_line`: (integer)\n* `end_line`: (integer)\n* `verbosity`: (integer)\n* `inventory_update`: (id)\n\n\n\n## Sorting\n\nTo specify that inventory update events are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_updates_events_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InventoryUpdateEvent" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventory_updates/{id}/notifications/": { + "get": { + "description": "notifications associated with the selected\ninventory update.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notifications\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification records. \n\n## Results\n\nEach notification data structure includes the following fields:\n\n* `id`: Database ID for this notification. (integer)\n* `type`: Data type for this notification. (choice)\n* `url`: URL for this notification. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification was created. (datetime)\n* `modified`: Timestamp when this notification was last modified. (datetime)\n* `notification_template`: (id)\n* `error`: (string)\n* `status`: (choice)\n - `pending`: Pending\n - `successful`: Successful\n - `failed`: Failed\n* `notifications_sent`: (integer)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `recipients`: (string)\n* `subject`: (string)\n* `body`: Notification body (json)\n\n\n\n## Sorting\n\nTo specify that notifications are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_inventory_updates_notifications_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Notification" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/inventory_updates/{id}/stdout/": { + "get": { + "description": "inventory update.\n\n## Format\n\nUse the `format` query string parameter to specify the output format.\n\n* Browsable API: `?format=api`\n* HTML: `?format=html`\n* Plain Text: `?format=txt`\n* Plain Text with ANSI color codes: `?format=ansi`\n* JSON structure: `?format=json`\n* Downloaded Plain Text: `?format=txt_download`\n* Downloaded Plain Text with ANSI color codes: `?format=ansi_download`\n\n(_New in Ansible Tower 2.0.0_) When using the Browsable API, HTML and JSON\nformats, the `start_line` and `end_line` query string parameters can be used\nto specify a range of line numbers to retrieve.\n\nUse `dark=1` or `dark=0` as a query string parameter to force or disable a\ndark background.\n\nFiles over 1.0\u00a0MB (configurable)\nwill not display in the browser. Use the `txt_download` or `ansi_download`\nformats to download the file directly to view it.", + "operationId": "api_inventory_updates_stdout_read", + "parameters": [], + "produces": [ + "text/plain", + "text/plain", + "application/json", + "text/plain", + "text/plain" + ], + "responses": { + "200": { + "description": "", + "examples": { + "text/plain": "\u30aa0\n\u30aa1\n\u30aa2\n" + }, + "schema": { + "$ref": "#/definitions/UnifiedJobStdout" + } + } + }, + "summary": "Make GET request to this resource to retrieve the stdout from running this", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/job_events/{id}/": { + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this job event. (integer)\n* `type`: Data type for this job event. (choice)\n* `url`: URL for this job event. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job event was created. (datetime)\n* `modified`: Timestamp when this job event was last modified. (datetime)\n* `job`: (id)\n* `event`: (choice)\n - `runner_on_failed`: Host Failed\n - `runner_on_start`: Host Started\n - `runner_on_ok`: Host OK\n - `runner_on_error`: Host Failure\n - `runner_on_skipped`: Host Skipped\n - `runner_on_unreachable`: Host Unreachable\n - `runner_on_no_hosts`: No Hosts Remaining\n - `runner_on_async_poll`: Host Polling\n - `runner_on_async_ok`: Host Async OK\n - `runner_on_async_failed`: Host Async Failure\n - `runner_item_on_ok`: Item OK\n - `runner_item_on_failed`: Item Failed\n - `runner_item_on_skipped`: Item Skipped\n - `runner_retry`: Host Retry\n - `runner_on_file_diff`: File Difference\n - `playbook_on_start`: Playbook Started\n - `playbook_on_notify`: Running Handlers\n - `playbook_on_include`: Including File\n - `playbook_on_no_hosts_matched`: No Hosts Matched\n - `playbook_on_no_hosts_remaining`: No Hosts Remaining\n - `playbook_on_task_start`: Task Started\n - `playbook_on_vars_prompt`: Variables Prompted\n - `playbook_on_setup`: Gathering Facts\n - `playbook_on_import_for_host`: internal: on Import for Host\n - `playbook_on_not_import_for_host`: internal: on Not Import for Host\n - `playbook_on_play_start`: Play Started\n - `playbook_on_stats`: Playbook Complete\n - `debug`: Debug\n - `verbose`: Verbose\n - `deprecated`: Deprecated\n - `warning`: Warning\n - `system_warning`: System Warning\n - `error`: Error\n* `counter`: (integer)\n* `event_display`: (string)\n* `event_data`: (json)\n* `event_level`: (integer)\n* `failed`: (boolean)\n* `changed`: (boolean)\n* `uuid`: (string)\n* `parent_uuid`: (string)\n* `host`: (id)\n* `host_name`: (string)\n* `playbook`: (string)\n* `play`: (string)\n* `task`: (string)\n* `role`: (string)\n* `stdout`: (string)\n* `start_line`: (integer)\n* `end_line`: (integer)\n* `verbosity`: (integer)", + "operationId": "api_job_events_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/JobEvent" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single job event", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/job_events/{id}/children/": { + "get": { + "description": "job events associated with the selected\njob event.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of job events\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more job event records. \n\n## Results\n\nEach job event data structure includes the following fields:\n\n* `id`: Database ID for this job event. (integer)\n* `type`: Data type for this job event. (choice)\n* `url`: URL for this job event. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job event was created. (datetime)\n* `modified`: Timestamp when this job event was last modified. (datetime)\n* `job`: (id)\n* `event`: (choice)\n - `runner_on_failed`: Host Failed\n - `runner_on_start`: Host Started\n - `runner_on_ok`: Host OK\n - `runner_on_error`: Host Failure\n - `runner_on_skipped`: Host Skipped\n - `runner_on_unreachable`: Host Unreachable\n - `runner_on_no_hosts`: No Hosts Remaining\n - `runner_on_async_poll`: Host Polling\n - `runner_on_async_ok`: Host Async OK\n - `runner_on_async_failed`: Host Async Failure\n - `runner_item_on_ok`: Item OK\n - `runner_item_on_failed`: Item Failed\n - `runner_item_on_skipped`: Item Skipped\n - `runner_retry`: Host Retry\n - `runner_on_file_diff`: File Difference\n - `playbook_on_start`: Playbook Started\n - `playbook_on_notify`: Running Handlers\n - `playbook_on_include`: Including File\n - `playbook_on_no_hosts_matched`: No Hosts Matched\n - `playbook_on_no_hosts_remaining`: No Hosts Remaining\n - `playbook_on_task_start`: Task Started\n - `playbook_on_vars_prompt`: Variables Prompted\n - `playbook_on_setup`: Gathering Facts\n - `playbook_on_import_for_host`: internal: on Import for Host\n - `playbook_on_not_import_for_host`: internal: on Not Import for Host\n - `playbook_on_play_start`: Play Started\n - `playbook_on_stats`: Playbook Complete\n - `debug`: Debug\n - `verbose`: Verbose\n - `deprecated`: Deprecated\n - `warning`: Warning\n - `system_warning`: System Warning\n - `error`: Error\n* `counter`: (integer)\n* `event_display`: (string)\n* `event_data`: (json)\n* `event_level`: (integer)\n* `failed`: (boolean)\n* `changed`: (boolean)\n* `uuid`: (string)\n* `parent_uuid`: (string)\n* `host`: (id)\n* `host_name`: (string)\n* `playbook`: (string)\n* `play`: (string)\n* `task`: (string)\n* `role`: (string)\n* `stdout`: (string)\n* `start_line`: (integer)\n* `end_line`: (integer)\n* `verbosity`: (integer)\n\n\n\n## Sorting\n\nTo specify that job events are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_events_children_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/JobEvent" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/job_host_summaries/{id}/": { + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this job host summary. (integer)\n* `type`: Data type for this job host summary. (choice)\n* `url`: URL for this job host summary. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job host summary was created. (datetime)\n* `modified`: Timestamp when this job host summary was last modified. (datetime)\n* `job`: (id)\n* `host`: (id)\n* `constructed_host`: Only for jobs run against constructed inventories, this links to the host inside the constructed inventory. (id)\n* `host_name`: (string)\n* `changed`: (integer)\n* `dark`: (integer)\n* `failures`: (integer)\n* `ok`: (integer)\n* `processed`: (integer)\n* `skipped`: (integer)\n* `failed`: (boolean)\n* `ignored`: (integer)\n* `rescued`: (integer)", + "operationId": "api_job_host_summaries_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/JobHostSummary" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single job host summary", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/job_templates/": { + "get": { + "description": "job templates.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of job templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more job template records. \n\n## Results\n\nEach job template data structure includes the following fields:\n\n* `id`: Database ID for this job template. (integer)\n* `type`: Data type for this job template. (choice)\n* `url`: URL for this job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job template was created. (datetime)\n* `modified`: Timestamp when this job template was last modified. (datetime)\n* `name`: Name of this job template. (string)\n* `description`: Optional description of this job template. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `project`: (id)\n* `playbook`: (string)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `forks`: (integer)\n* `limit`: (string)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json)\n* `job_tags`: (string)\n* `force_handlers`: (boolean)\n* `skip_tags`: (string)\n* `start_at_task`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean)\n* `organization`: The organization used to determine access to this template. (id)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n* `execution_environment`: The container image to be used for execution. (id)\n* `host_config_key`: (string)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_diff_mode_on_launch`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_job_type_on_launch`: (boolean)\n* `ask_verbosity_on_launch`: (boolean)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_credential_on_launch`: (boolean)\n* `ask_execution_environment_on_launch`: (boolean)\n* `ask_labels_on_launch`: (boolean)\n* `ask_forks_on_launch`: (boolean)\n* `ask_job_slice_count_on_launch`: (boolean)\n* `ask_timeout_on_launch`: (boolean)\n* `ask_instance_groups_on_launch`: (boolean)\n* `survey_enabled`: (boolean)\n* `become_enabled`: (boolean)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean)\n* `allow_simultaneous`: (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `job_slice_count`: The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1. (integer)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `prevent_instance_group_fallback`: If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n## Sorting\n\nTo specify that job templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/JobTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "job templates.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of job templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more job template records. \n\n## Results\n\nEach job template data structure includes the following fields:\n\n* `id`: Database ID for this job template. (integer)\n* `type`: Data type for this job template. (choice)\n* `url`: URL for this job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job template was created. (datetime)\n* `modified`: Timestamp when this job template was last modified. (datetime)\n* `name`: Name of this job template. (string)\n* `description`: Optional description of this job template. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `project`: (id)\n* `playbook`: (string)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `forks`: (integer)\n* `limit`: (string)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json)\n* `job_tags`: (string)\n* `force_handlers`: (boolean)\n* `skip_tags`: (string)\n* `start_at_task`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean)\n* `organization`: The organization used to determine access to this template. (id)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n* `execution_environment`: The container image to be used for execution. (id)\n* `host_config_key`: (string)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_diff_mode_on_launch`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_job_type_on_launch`: (boolean)\n* `ask_verbosity_on_launch`: (boolean)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_credential_on_launch`: (boolean)\n* `ask_execution_environment_on_launch`: (boolean)\n* `ask_labels_on_launch`: (boolean)\n* `ask_forks_on_launch`: (boolean)\n* `ask_job_slice_count_on_launch`: (boolean)\n* `ask_timeout_on_launch`: (boolean)\n* `ask_instance_groups_on_launch`: (boolean)\n* `survey_enabled`: (boolean)\n* `become_enabled`: (boolean)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean)\n* `allow_simultaneous`: (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `job_slice_count`: The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1. (integer)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `prevent_instance_group_fallback`: If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n## Sorting\n\nTo specify that job templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/JobTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/JobTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this job template. (integer)\n* `type`: Data type for this job template. (choice)\n* `url`: URL for this job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job template was created. (datetime)\n* `modified`: Timestamp when this job template was last modified. (datetime)\n* `name`: Name of this job template. (string)\n* `description`: Optional description of this job template. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `project`: (id)\n* `playbook`: (string)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `forks`: (integer)\n* `limit`: (string)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json)\n* `job_tags`: (string)\n* `force_handlers`: (boolean)\n* `skip_tags`: (string)\n* `start_at_task`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean)\n* `organization`: The organization used to determine access to this template. (id)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n* `execution_environment`: The container image to be used for execution. (id)\n* `host_config_key`: (string)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_diff_mode_on_launch`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_job_type_on_launch`: (boolean)\n* `ask_verbosity_on_launch`: (boolean)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_credential_on_launch`: (boolean)\n* `ask_execution_environment_on_launch`: (boolean)\n* `ask_labels_on_launch`: (boolean)\n* `ask_forks_on_launch`: (boolean)\n* `ask_job_slice_count_on_launch`: (boolean)\n* `ask_timeout_on_launch`: (boolean)\n* `ask_instance_groups_on_launch`: (boolean)\n* `survey_enabled`: (boolean)\n* `become_enabled`: (boolean)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean)\n* `allow_simultaneous`: (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `job_slice_count`: The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1. (integer)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `prevent_instance_group_fallback`: If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n\n\n# Update a Job Template:\n\nMake a PUT or PATCH request to this resource to update this\njob template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this job template. (string, required)\n* `description`: Optional description of this job template. (string, default=`\"\"`)\n* `job_type`: (choice)\n - `run`: Run (default)\n - `check`: Check\n* `inventory`: (id, default=``)\n* `project`: (id, default=``)\n* `playbook`: (string, default=`\"\"`)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string, default=`\"\"`)\n* `forks`: (integer, default=`0`)\n* `limit`: (string, default=`\"\"`)\n* `verbosity`: (choice)\n - `0`: 0 (Normal) (default)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json, default=``)\n* `job_tags`: (string, default=`\"\"`)\n* `force_handlers`: (boolean, default=`False`)\n* `skip_tags`: (string, default=`\"\"`)\n* `start_at_task`: (string, default=`\"\"`)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer, default=`0`)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean, default=`False`)\n\n\n\n\n\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `host_config_key`: (string, default=`\"\"`)\n* `ask_scm_branch_on_launch`: (boolean, default=`False`)\n* `ask_diff_mode_on_launch`: (boolean, default=`False`)\n* `ask_variables_on_launch`: (boolean, default=`False`)\n* `ask_limit_on_launch`: (boolean, default=`False`)\n* `ask_tags_on_launch`: (boolean, default=`False`)\n* `ask_skip_tags_on_launch`: (boolean, default=`False`)\n* `ask_job_type_on_launch`: (boolean, default=`False`)\n* `ask_verbosity_on_launch`: (boolean, default=`False`)\n* `ask_inventory_on_launch`: (boolean, default=`False`)\n* `ask_credential_on_launch`: (boolean, default=`False`)\n* `ask_execution_environment_on_launch`: (boolean, default=`False`)\n* `ask_labels_on_launch`: (boolean, default=`False`)\n* `ask_forks_on_launch`: (boolean, default=`False`)\n* `ask_job_slice_count_on_launch`: (boolean, default=`False`)\n* `ask_timeout_on_launch`: (boolean, default=`False`)\n* `ask_instance_groups_on_launch`: (boolean, default=`False`)\n* `survey_enabled`: (boolean, default=`False`)\n* `become_enabled`: (boolean, default=`False`)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean, default=`False`)\n* `allow_simultaneous`: (boolean, default=`False`)\n\n* `job_slice_count`: The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1. (integer, default=`1`)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id, default=``)\n* `prevent_instance_group_fallback`: If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean, default=`False`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Job Template:\n\nMake a DELETE request to this resource to delete this job template.", + "operationId": "api_job_templates_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single job template", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this job template. (integer)\n* `type`: Data type for this job template. (choice)\n* `url`: URL for this job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job template was created. (datetime)\n* `modified`: Timestamp when this job template was last modified. (datetime)\n* `name`: Name of this job template. (string)\n* `description`: Optional description of this job template. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `project`: (id)\n* `playbook`: (string)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `forks`: (integer)\n* `limit`: (string)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json)\n* `job_tags`: (string)\n* `force_handlers`: (boolean)\n* `skip_tags`: (string)\n* `start_at_task`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean)\n* `organization`: The organization used to determine access to this template. (id)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n* `execution_environment`: The container image to be used for execution. (id)\n* `host_config_key`: (string)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_diff_mode_on_launch`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_job_type_on_launch`: (boolean)\n* `ask_verbosity_on_launch`: (boolean)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_credential_on_launch`: (boolean)\n* `ask_execution_environment_on_launch`: (boolean)\n* `ask_labels_on_launch`: (boolean)\n* `ask_forks_on_launch`: (boolean)\n* `ask_job_slice_count_on_launch`: (boolean)\n* `ask_timeout_on_launch`: (boolean)\n* `ask_instance_groups_on_launch`: (boolean)\n* `survey_enabled`: (boolean)\n* `become_enabled`: (boolean)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean)\n* `allow_simultaneous`: (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `job_slice_count`: The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1. (integer)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `prevent_instance_group_fallback`: If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n\n\n# Update a Job Template:\n\nMake a PUT or PATCH request to this resource to update this\njob template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this job template. (string, required)\n* `description`: Optional description of this job template. (string, default=`\"\"`)\n* `job_type`: (choice)\n - `run`: Run (default)\n - `check`: Check\n* `inventory`: (id, default=``)\n* `project`: (id, default=``)\n* `playbook`: (string, default=`\"\"`)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string, default=`\"\"`)\n* `forks`: (integer, default=`0`)\n* `limit`: (string, default=`\"\"`)\n* `verbosity`: (choice)\n - `0`: 0 (Normal) (default)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json, default=``)\n* `job_tags`: (string, default=`\"\"`)\n* `force_handlers`: (boolean, default=`False`)\n* `skip_tags`: (string, default=`\"\"`)\n* `start_at_task`: (string, default=`\"\"`)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer, default=`0`)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean, default=`False`)\n\n\n\n\n\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `host_config_key`: (string, default=`\"\"`)\n* `ask_scm_branch_on_launch`: (boolean, default=`False`)\n* `ask_diff_mode_on_launch`: (boolean, default=`False`)\n* `ask_variables_on_launch`: (boolean, default=`False`)\n* `ask_limit_on_launch`: (boolean, default=`False`)\n* `ask_tags_on_launch`: (boolean, default=`False`)\n* `ask_skip_tags_on_launch`: (boolean, default=`False`)\n* `ask_job_type_on_launch`: (boolean, default=`False`)\n* `ask_verbosity_on_launch`: (boolean, default=`False`)\n* `ask_inventory_on_launch`: (boolean, default=`False`)\n* `ask_credential_on_launch`: (boolean, default=`False`)\n* `ask_execution_environment_on_launch`: (boolean, default=`False`)\n* `ask_labels_on_launch`: (boolean, default=`False`)\n* `ask_forks_on_launch`: (boolean, default=`False`)\n* `ask_job_slice_count_on_launch`: (boolean, default=`False`)\n* `ask_timeout_on_launch`: (boolean, default=`False`)\n* `ask_instance_groups_on_launch`: (boolean, default=`False`)\n* `survey_enabled`: (boolean, default=`False`)\n* `become_enabled`: (boolean, default=`False`)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean, default=`False`)\n* `allow_simultaneous`: (boolean, default=`False`)\n\n* `job_slice_count`: The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1. (integer, default=`1`)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id, default=``)\n* `prevent_instance_group_fallback`: If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean, default=`False`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Job Template:\n\nMake a DELETE request to this resource to delete this job template.", + "operationId": "api_job_templates_read", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "allow_simultaneous": false, + "ask_credential_on_launch": false, + "ask_diff_mode_on_launch": false, + "ask_execution_environment_on_launch": false, + "ask_forks_on_launch": false, + "ask_instance_groups_on_launch": false, + "ask_inventory_on_launch": false, + "ask_job_slice_count_on_launch": false, + "ask_job_type_on_launch": false, + "ask_labels_on_launch": false, + "ask_limit_on_launch": false, + "ask_scm_branch_on_launch": false, + "ask_skip_tags_on_launch": false, + "ask_tags_on_launch": false, + "ask_timeout_on_launch": false, + "ask_variables_on_launch": false, + "ask_verbosity_on_launch": false, + "become_enabled": false, + "created": "2018-02-01T08:00:00.000000Z", + "custom_virtualenv": null, + "description": "", + "diff_mode": false, + "execution_environment": null, + "extra_vars": "", + "force_handlers": false, + "forks": 0, + "host_config_key": "", + "id": 1, + "inventory": null, + "job_slice_count": 1, + "job_tags": "", + "job_type": "run", + "last_job_failed": false, + "last_job_run": null, + "limit": "", + "modified": "2018-02-01T08:00:00.000000Z", + "name": "test-job_template", + "next_job_run": null, + "organization": null, + "playbook": "", + "prevent_instance_group_fallback": false, + "project": null, + "related": { + "access_list": "/api/v2/job_templates/1/access_list/", + "activity_stream": "/api/v2/job_templates/1/activity_stream/", + "copy": "/api/v2/job_templates/1/copy/", + "credentials": "/api/v2/job_templates/1/credentials/", + "instance_groups": "/api/v2/job_templates/1/instance_groups/", + "jobs": "/api/v2/job_templates/1/jobs/", + "labels": "/api/v2/job_templates/1/labels/", + "launch": "/api/v2/job_templates/1/launch/", + "notification_templates_error": "/api/v2/job_templates/1/notification_templates_error/", + "notification_templates_started": "/api/v2/job_templates/1/notification_templates_started/", + "notification_templates_success": "/api/v2/job_templates/1/notification_templates_success/", + "object_roles": "/api/v2/job_templates/1/object_roles/", + "schedules": "/api/v2/job_templates/1/schedules/", + "slice_workflow_jobs": "/api/v2/job_templates/1/slice_workflow_jobs/", + "survey_spec": "/api/v2/job_templates/1/survey_spec/", + "webhook_key": "/api/v2/job_templates/1/webhook_key/", + "webhook_receiver": "" + }, + "scm_branch": "", + "skip_tags": "", + "start_at_task": "", + "status": "never updated", + "summary_fields": { + "credentials": [], + "labels": { + "count": 0, + "results": [] + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the job template", + "id": 1, + "name": "Admin" + }, + "execute_role": { + "description": "May run the job template", + "id": 2, + "name": "Execute" + }, + "read_role": { + "description": "May view settings for the job template", + "id": 3, + "name": "Read" + } + }, + "recent_jobs": [], + "user_capabilities": { + "copy": false, + "delete": true, + "edit": true, + "schedule": false, + "start": false + } + }, + "survey_enabled": false, + "timeout": 0, + "type": "job_template", + "url": "/api/v2/job_templates/1/", + "use_fact_cache": false, + "verbosity": 0, + "webhook_credential": null, + "webhook_service": "" + } + }, + "schema": { + "$ref": "#/definitions/JobTemplate" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single job template", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this job template. (integer)\n* `type`: Data type for this job template. (choice)\n* `url`: URL for this job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job template was created. (datetime)\n* `modified`: Timestamp when this job template was last modified. (datetime)\n* `name`: Name of this job template. (string)\n* `description`: Optional description of this job template. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `project`: (id)\n* `playbook`: (string)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `forks`: (integer)\n* `limit`: (string)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json)\n* `job_tags`: (string)\n* `force_handlers`: (boolean)\n* `skip_tags`: (string)\n* `start_at_task`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean)\n* `organization`: The organization used to determine access to this template. (id)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n* `execution_environment`: The container image to be used for execution. (id)\n* `host_config_key`: (string)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_diff_mode_on_launch`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_job_type_on_launch`: (boolean)\n* `ask_verbosity_on_launch`: (boolean)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_credential_on_launch`: (boolean)\n* `ask_execution_environment_on_launch`: (boolean)\n* `ask_labels_on_launch`: (boolean)\n* `ask_forks_on_launch`: (boolean)\n* `ask_job_slice_count_on_launch`: (boolean)\n* `ask_timeout_on_launch`: (boolean)\n* `ask_instance_groups_on_launch`: (boolean)\n* `survey_enabled`: (boolean)\n* `become_enabled`: (boolean)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean)\n* `allow_simultaneous`: (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `job_slice_count`: The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1. (integer)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `prevent_instance_group_fallback`: If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n\n\n# Update a Job Template:\n\nMake a PUT or PATCH request to this resource to update this\njob template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this job template. (string, required)\n* `description`: Optional description of this job template. (string, default=`\"\"`)\n* `job_type`: (choice)\n - `run`: Run (default)\n - `check`: Check\n* `inventory`: (id, default=``)\n* `project`: (id, default=``)\n* `playbook`: (string, default=`\"\"`)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string, default=`\"\"`)\n* `forks`: (integer, default=`0`)\n* `limit`: (string, default=`\"\"`)\n* `verbosity`: (choice)\n - `0`: 0 (Normal) (default)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json, default=``)\n* `job_tags`: (string, default=`\"\"`)\n* `force_handlers`: (boolean, default=`False`)\n* `skip_tags`: (string, default=`\"\"`)\n* `start_at_task`: (string, default=`\"\"`)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer, default=`0`)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean, default=`False`)\n\n\n\n\n\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `host_config_key`: (string, default=`\"\"`)\n* `ask_scm_branch_on_launch`: (boolean, default=`False`)\n* `ask_diff_mode_on_launch`: (boolean, default=`False`)\n* `ask_variables_on_launch`: (boolean, default=`False`)\n* `ask_limit_on_launch`: (boolean, default=`False`)\n* `ask_tags_on_launch`: (boolean, default=`False`)\n* `ask_skip_tags_on_launch`: (boolean, default=`False`)\n* `ask_job_type_on_launch`: (boolean, default=`False`)\n* `ask_verbosity_on_launch`: (boolean, default=`False`)\n* `ask_inventory_on_launch`: (boolean, default=`False`)\n* `ask_credential_on_launch`: (boolean, default=`False`)\n* `ask_execution_environment_on_launch`: (boolean, default=`False`)\n* `ask_labels_on_launch`: (boolean, default=`False`)\n* `ask_forks_on_launch`: (boolean, default=`False`)\n* `ask_job_slice_count_on_launch`: (boolean, default=`False`)\n* `ask_timeout_on_launch`: (boolean, default=`False`)\n* `ask_instance_groups_on_launch`: (boolean, default=`False`)\n* `survey_enabled`: (boolean, default=`False`)\n* `become_enabled`: (boolean, default=`False`)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean, default=`False`)\n* `allow_simultaneous`: (boolean, default=`False`)\n\n* `job_slice_count`: The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1. (integer, default=`1`)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id, default=``)\n* `prevent_instance_group_fallback`: If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean, default=`False`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Job Template:\n\nMake a DELETE request to this resource to delete this job template.", + "operationId": "api_job_templates_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "webhook_credential": 1 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "allow_simultaneous": false, + "ask_credential_on_launch": false, + "ask_diff_mode_on_launch": false, + "ask_execution_environment_on_launch": false, + "ask_forks_on_launch": false, + "ask_instance_groups_on_launch": false, + "ask_inventory_on_launch": false, + "ask_job_slice_count_on_launch": false, + "ask_job_type_on_launch": false, + "ask_labels_on_launch": false, + "ask_limit_on_launch": false, + "ask_scm_branch_on_launch": false, + "ask_skip_tags_on_launch": false, + "ask_tags_on_launch": false, + "ask_timeout_on_launch": false, + "ask_variables_on_launch": false, + "ask_verbosity_on_launch": false, + "become_enabled": false, + "created": "2018-02-01T08:00:00.000000Z", + "custom_virtualenv": null, + "description": "", + "diff_mode": false, + "execution_environment": null, + "extra_vars": "", + "force_handlers": false, + "forks": 0, + "host_config_key": "", + "id": 2, + "inventory": 1, + "job_slice_count": 1, + "job_tags": "", + "job_type": "run", + "last_job_failed": false, + "last_job_run": null, + "limit": "", + "modified": "2018-02-01T08:00:00.000000Z", + "name": "jt", + "next_job_run": null, + "organization": 1, + "playbook": "helloworld.yml", + "prevent_instance_group_fallback": false, + "project": 1, + "related": { + "access_list": "/api/v2/job_templates/2/access_list/", + "activity_stream": "/api/v2/job_templates/2/activity_stream/", + "copy": "/api/v2/job_templates/2/copy/", + "credentials": "/api/v2/job_templates/2/credentials/", + "instance_groups": "/api/v2/job_templates/2/instance_groups/", + "inventory": "/api/v2/inventories/1/", + "jobs": "/api/v2/job_templates/2/jobs/", + "labels": "/api/v2/job_templates/2/labels/", + "launch": "/api/v2/job_templates/2/launch/", + "notification_templates_error": "/api/v2/job_templates/2/notification_templates_error/", + "notification_templates_started": "/api/v2/job_templates/2/notification_templates_started/", + "notification_templates_success": "/api/v2/job_templates/2/notification_templates_success/", + "object_roles": "/api/v2/job_templates/2/object_roles/", + "organization": "/api/v2/organizations/1/", + "project": "/api/v2/projects/1/", + "schedules": "/api/v2/job_templates/2/schedules/", + "slice_workflow_jobs": "/api/v2/job_templates/2/slice_workflow_jobs/", + "survey_spec": "/api/v2/job_templates/2/survey_spec/", + "webhook_credential": "/api/v2/credentials/1/", + "webhook_key": "/api/v2/job_templates/2/webhook_key/", + "webhook_receiver": "/api/v2/job_templates/2/gitlab/" + }, + "scm_branch": "", + "skip_tags": "", + "start_at_task": "", + "status": "never updated", + "summary_fields": { + "credentials": [], + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test_inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + }, + "labels": { + "count": 0, + "results": [] + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the job template", + "id": 28, + "name": "Admin" + }, + "execute_role": { + "description": "May run the job template", + "id": 29, + "name": "Execute" + }, + "read_role": { + "description": "May view settings for the job template", + "id": 30, + "name": "Read" + } + }, + "organization": { + "description": "org", + "id": 1, + "name": "org" + }, + "project": { + "allow_override": false, + "description": "test_proj-description", + "id": 1, + "name": "test_proj", + "scm_type": "", + "status": "missing" + }, + "recent_jobs": [], + "user_capabilities": { + "copy": true, + "delete": true, + "edit": true, + "schedule": true, + "start": true + }, + "webhook_credential": { + "cloud": false, + "credential_type_id": 1, + "description": "", + "id": 1, + "kind": "gitlab_token", + "name": "test-cred" + } + }, + "survey_enabled": false, + "timeout": 0, + "type": "job_template", + "url": "/api/v2/job_templates/2/", + "use_fact_cache": false, + "verbosity": 0, + "webhook_credential": 1, + "webhook_service": "gitlab" + } + }, + "schema": { + "$ref": "#/definitions/JobTemplate" + } + }, + "400": { + "examples": { + "application/json": { + "webhook_credential": [ + "Must match the selected webhook service." + ] + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single job template", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this job template. (integer)\n* `type`: Data type for this job template. (choice)\n* `url`: URL for this job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job template was created. (datetime)\n* `modified`: Timestamp when this job template was last modified. (datetime)\n* `name`: Name of this job template. (string)\n* `description`: Optional description of this job template. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `project`: (id)\n* `playbook`: (string)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `forks`: (integer)\n* `limit`: (string)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json)\n* `job_tags`: (string)\n* `force_handlers`: (boolean)\n* `skip_tags`: (string)\n* `start_at_task`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean)\n* `organization`: The organization used to determine access to this template. (id)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n* `execution_environment`: The container image to be used for execution. (id)\n* `host_config_key`: (string)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_diff_mode_on_launch`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_job_type_on_launch`: (boolean)\n* `ask_verbosity_on_launch`: (boolean)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_credential_on_launch`: (boolean)\n* `ask_execution_environment_on_launch`: (boolean)\n* `ask_labels_on_launch`: (boolean)\n* `ask_forks_on_launch`: (boolean)\n* `ask_job_slice_count_on_launch`: (boolean)\n* `ask_timeout_on_launch`: (boolean)\n* `ask_instance_groups_on_launch`: (boolean)\n* `survey_enabled`: (boolean)\n* `become_enabled`: (boolean)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean)\n* `allow_simultaneous`: (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `job_slice_count`: The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1. (integer)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `prevent_instance_group_fallback`: If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n\n\n# Update a Job Template:\n\nMake a PUT or PATCH request to this resource to update this\njob template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this job template. (string, required)\n* `description`: Optional description of this job template. (string, default=`\"\"`)\n* `job_type`: (choice)\n - `run`: Run (default)\n - `check`: Check\n* `inventory`: (id, default=``)\n* `project`: (id, default=``)\n* `playbook`: (string, default=`\"\"`)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string, default=`\"\"`)\n* `forks`: (integer, default=`0`)\n* `limit`: (string, default=`\"\"`)\n* `verbosity`: (choice)\n - `0`: 0 (Normal) (default)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json, default=``)\n* `job_tags`: (string, default=`\"\"`)\n* `force_handlers`: (boolean, default=`False`)\n* `skip_tags`: (string, default=`\"\"`)\n* `start_at_task`: (string, default=`\"\"`)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer, default=`0`)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean, default=`False`)\n\n\n\n\n\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `host_config_key`: (string, default=`\"\"`)\n* `ask_scm_branch_on_launch`: (boolean, default=`False`)\n* `ask_diff_mode_on_launch`: (boolean, default=`False`)\n* `ask_variables_on_launch`: (boolean, default=`False`)\n* `ask_limit_on_launch`: (boolean, default=`False`)\n* `ask_tags_on_launch`: (boolean, default=`False`)\n* `ask_skip_tags_on_launch`: (boolean, default=`False`)\n* `ask_job_type_on_launch`: (boolean, default=`False`)\n* `ask_verbosity_on_launch`: (boolean, default=`False`)\n* `ask_inventory_on_launch`: (boolean, default=`False`)\n* `ask_credential_on_launch`: (boolean, default=`False`)\n* `ask_execution_environment_on_launch`: (boolean, default=`False`)\n* `ask_labels_on_launch`: (boolean, default=`False`)\n* `ask_forks_on_launch`: (boolean, default=`False`)\n* `ask_job_slice_count_on_launch`: (boolean, default=`False`)\n* `ask_timeout_on_launch`: (boolean, default=`False`)\n* `ask_instance_groups_on_launch`: (boolean, default=`False`)\n* `survey_enabled`: (boolean, default=`False`)\n* `become_enabled`: (boolean, default=`False`)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean, default=`False`)\n* `allow_simultaneous`: (boolean, default=`False`)\n\n* `job_slice_count`: The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1. (integer, default=`1`)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id, default=``)\n* `prevent_instance_group_fallback`: If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean, default=`False`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Job Template:\n\nMake a DELETE request to this resource to delete this job template.", + "operationId": "api_job_templates_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "allow_simultaneous": false, + "ask_credential_on_launch": true, + "ask_diff_mode_on_launch": false, + "ask_execution_environment_on_launch": false, + "ask_forks_on_launch": false, + "ask_instance_groups_on_launch": false, + "ask_inventory_on_launch": false, + "ask_job_slice_count_on_launch": false, + "ask_job_type_on_launch": false, + "ask_labels_on_launch": false, + "ask_limit_on_launch": false, + "ask_scm_branch_on_launch": false, + "ask_skip_tags_on_launch": false, + "ask_tags_on_launch": false, + "ask_timeout_on_launch": false, + "ask_variables_on_launch": false, + "ask_verbosity_on_launch": false, + "become_enabled": false, + "created": "2018-02-01T08:00:00.000000Z", + "custom_virtualenv": null, + "description": "", + "diff_mode": false, + "execution_environment": null, + "extra_vars": "", + "force_handlers": false, + "forks": 0, + "host_config_key": "", + "id": 1, + "inventory": 1, + "job_slice_count": 1, + "job_tags": "", + "job_type": "run", + "last_job_failed": false, + "last_job_run": null, + "limit": "", + "modified": "2018-02-01T08:00:00.000000Z", + "name": "test-job_template", + "next_job_run": null, + "organization": 1, + "playbook": "helloworld.yml", + "prevent_instance_group_fallback": false, + "project": 2, + "related": { + "access_list": "/api/v2/job_templates/1/access_list/", + "activity_stream": "/api/v2/job_templates/1/activity_stream/", + "copy": "/api/v2/job_templates/1/copy/", + "credentials": "/api/v2/job_templates/1/credentials/", + "instance_groups": "/api/v2/job_templates/1/instance_groups/", + "inventory": "/api/v2/inventories/1/", + "jobs": "/api/v2/job_templates/1/jobs/", + "labels": "/api/v2/job_templates/1/labels/", + "launch": "/api/v2/job_templates/1/launch/", + "notification_templates_error": "/api/v2/job_templates/1/notification_templates_error/", + "notification_templates_started": "/api/v2/job_templates/1/notification_templates_started/", + "notification_templates_success": "/api/v2/job_templates/1/notification_templates_success/", + "object_roles": "/api/v2/job_templates/1/object_roles/", + "organization": "/api/v2/organizations/1/", + "project": "/api/v2/projects/2/", + "schedules": "/api/v2/job_templates/1/schedules/", + "slice_workflow_jobs": "/api/v2/job_templates/1/slice_workflow_jobs/", + "survey_spec": "/api/v2/job_templates/1/survey_spec/", + "webhook_key": "/api/v2/job_templates/1/webhook_key/", + "webhook_receiver": "" + }, + "scm_branch": "", + "skip_tags": "", + "start_at_task": "", + "status": "never updated", + "summary_fields": { + "credentials": [ + { + "cloud": false, + "description": "", + "id": 2, + "kind": "vault", + "name": "second-vault" + }, + { + "cloud": false, + "description": "", + "id": 1, + "kind": "vault", + "name": "test-cred" + } + ], + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + }, + "labels": { + "count": 0, + "results": [] + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the job template", + "id": 2, + "name": "Admin" + }, + "execute_role": { + "description": "May run the job template", + "id": 3, + "name": "Execute" + }, + "read_role": { + "description": "May view settings for the job template", + "id": 4, + "name": "Read" + } + }, + "organization": { + "description": "test-org-desc", + "id": 1, + "name": "test-org" + }, + "project": { + "allow_override": false, + "description": "test-proj-desc", + "id": 2, + "name": "test-proj", + "scm_type": "git", + "status": "never updated" + }, + "recent_jobs": [], + "user_capabilities": { + "copy": true, + "delete": true, + "edit": true, + "schedule": true, + "start": true + } + }, + "survey_enabled": false, + "timeout": 0, + "type": "job_template", + "url": "/api/v2/job_templates/1/", + "use_fact_cache": false, + "verbosity": 0, + "webhook_credential": null, + "webhook_service": "" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "allow_simultaneous": false, + "ask_credential_on_launch": true, + "ask_diff_mode_on_launch": false, + "ask_execution_environment_on_launch": false, + "ask_forks_on_launch": false, + "ask_instance_groups_on_launch": false, + "ask_inventory_on_launch": false, + "ask_job_slice_count_on_launch": false, + "ask_job_type_on_launch": false, + "ask_labels_on_launch": false, + "ask_limit_on_launch": false, + "ask_scm_branch_on_launch": false, + "ask_skip_tags_on_launch": false, + "ask_tags_on_launch": false, + "ask_timeout_on_launch": false, + "ask_variables_on_launch": false, + "ask_verbosity_on_launch": false, + "become_enabled": false, + "created": "2018-02-01T08:00:00.000000Z", + "custom_virtualenv": null, + "description": "", + "diff_mode": false, + "execution_environment": null, + "extra_vars": "", + "force_handlers": false, + "forks": 0, + "host_config_key": "", + "id": 1, + "inventory": 1, + "job_slice_count": 1, + "job_tags": "", + "job_type": "run", + "last_job_failed": false, + "last_job_run": null, + "limit": "", + "modified": "2018-02-01T08:00:00.000000Z", + "name": "test-job_template", + "next_job_run": null, + "organization": 1, + "playbook": "helloworld.yml", + "prevent_instance_group_fallback": false, + "project": 2, + "related": { + "access_list": "/api/v2/job_templates/1/access_list/", + "activity_stream": "/api/v2/job_templates/1/activity_stream/", + "copy": "/api/v2/job_templates/1/copy/", + "credentials": "/api/v2/job_templates/1/credentials/", + "instance_groups": "/api/v2/job_templates/1/instance_groups/", + "inventory": "/api/v2/inventories/1/", + "jobs": "/api/v2/job_templates/1/jobs/", + "labels": "/api/v2/job_templates/1/labels/", + "launch": "/api/v2/job_templates/1/launch/", + "notification_templates_error": "/api/v2/job_templates/1/notification_templates_error/", + "notification_templates_started": "/api/v2/job_templates/1/notification_templates_started/", + "notification_templates_success": "/api/v2/job_templates/1/notification_templates_success/", + "object_roles": "/api/v2/job_templates/1/object_roles/", + "organization": "/api/v2/organizations/1/", + "project": "/api/v2/projects/2/", + "schedules": "/api/v2/job_templates/1/schedules/", + "slice_workflow_jobs": "/api/v2/job_templates/1/slice_workflow_jobs/", + "survey_spec": "/api/v2/job_templates/1/survey_spec/", + "webhook_key": "/api/v2/job_templates/1/webhook_key/", + "webhook_receiver": "" + }, + "scm_branch": "", + "skip_tags": "", + "start_at_task": "", + "status": "never updated", + "summary_fields": { + "credentials": [ + { + "cloud": false, + "description": "", + "id": 2, + "kind": "vault", + "name": "second-vault" + }, + { + "cloud": false, + "description": "", + "id": 1, + "kind": "vault", + "name": "test-cred" + } + ], + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + }, + "labels": { + "count": 0, + "results": [] + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the job template", + "id": 2, + "name": "Admin" + }, + "execute_role": { + "description": "May run the job template", + "id": 3, + "name": "Execute" + }, + "read_role": { + "description": "May view settings for the job template", + "id": 4, + "name": "Read" + } + }, + "organization": { + "description": "test-org-desc", + "id": 1, + "name": "test-org" + }, + "project": { + "allow_override": false, + "description": "test-proj-desc", + "id": 2, + "name": "test-proj", + "scm_type": "git", + "status": "never updated" + }, + "recent_jobs": [], + "user_capabilities": { + "copy": true, + "delete": true, + "edit": true, + "schedule": true, + "start": true + } + }, + "survey_enabled": false, + "timeout": 0, + "type": "job_template", + "url": "/api/v2/job_templates/1/", + "use_fact_cache": false, + "verbosity": 0, + "webhook_credential": null, + "webhook_service": "" + } + }, + "schema": { + "$ref": "#/definitions/JobTemplate" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single job template", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/access_list/": { + "get": { + "description": "users.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_access_list_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ResourceAccessListElement" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/job_templates/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/job_templates/{id}/callback/": { + "get": { + "description": "parameter, to start a new job limited to only the requesting host. In the\nexamples below, replace the `N` parameter with the `id` of the job template\nand the `HOST_CONFIG_KEY` with the `host_config_key` associated with the\njob template.\n\nFor example, using curl:\n\n curl -H \"Content-Type: application/json\" -d '{\"host_config_key\": \"HOST_CONFIG_KEY\"}' http://server/api/v2/job_templates/N/callback/\n\nOr using wget:\n\n wget -O /dev/null --post-data='{\"host_config_key\": \"HOST_CONFIG_KEY\"}' --header=Content-Type:application/json http://server/api/v2/job_templates/N/callback/\n\nYou may also pass `extra_vars` to the callback:\n\n curl -H \"Content-Type: application/json\" -d '{\"host_config_key\": \"HOST_CONFIG_KEY\", \"extra_vars\": {\"key\": \"value\"}}' http://server/api/v2/job_templates/N/callback/\n\nThe response will return status 202 if the request is valid, 403 for an\ninvalid host config key, or 400 if the host cannot be determined from the\naddress making the request.\n\n_(New in Ansible Tower 2.0.0)_ If the associated inventory has the\n`update_on_launch` flag set and if the `update_cache_timeout` has expired, the\ncallback will perform an inventory sync to find a matching host.\n\nA GET request may be used to verify that the correct host will be selected.\nThis request must authenticate as a valid user with permission to edit the\njob template. For example:\n\n curl http://user:password@server/api/v2/job_templates/N/callback/\n\nThe response will include the host config key as well as the host name(s)\nthat would match the request:\n\n {\n \"host_config_key\": \"HOST_CONFIG_KEY\",\n \"matching_hosts\": [\"hostname\"]\n }", + "operationId": "api_job_templates_callback_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Empty" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Configure a host to POST to this resource, passing the `host_config_key`", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "parameter, to start a new job limited to only the requesting host. In the\nexamples below, replace the `N` parameter with the `id` of the job template\nand the `HOST_CONFIG_KEY` with the `host_config_key` associated with the\njob template.\n\nFor example, using curl:\n\n curl -H \"Content-Type: application/json\" -d '{\"host_config_key\": \"HOST_CONFIG_KEY\"}' http://server/api/v2/job_templates/N/callback/\n\nOr using wget:\n\n wget -O /dev/null --post-data='{\"host_config_key\": \"HOST_CONFIG_KEY\"}' --header=Content-Type:application/json http://server/api/v2/job_templates/N/callback/\n\nYou may also pass `extra_vars` to the callback:\n\n curl -H \"Content-Type: application/json\" -d '{\"host_config_key\": \"HOST_CONFIG_KEY\", \"extra_vars\": {\"key\": \"value\"}}' http://server/api/v2/job_templates/N/callback/\n\nThe response will return status 202 if the request is valid, 403 for an\ninvalid host config key, or 400 if the host cannot be determined from the\naddress making the request.\n\n_(New in Ansible Tower 2.0.0)_ If the associated inventory has the\n`update_on_launch` flag set and if the `update_cache_timeout` has expired, the\ncallback will perform an inventory sync to find a matching host.\n\nA GET request may be used to verify that the correct host will be selected.\nThis request must authenticate as a valid user with permission to edit the\njob template. For example:\n\n curl http://user:password@server/api/v2/job_templates/N/callback/\n\nThe response will include the host config key as well as the host name(s)\nthat would match the request:\n\n {\n \"host_config_key\": \"HOST_CONFIG_KEY\",\n \"matching_hosts\": [\"hostname\"]\n }", + "operationId": "api_job_templates_callback_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Empty" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Empty" + } + } + }, + "summary": "Configure a host to POST to this resource, passing the `host_config_key`", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/copy/": { + "get": { + "description": "", + "operationId": "api_job_templates_copy_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Copy" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "No Description for get on /api/{version}/job_templates/{id}/copy/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_job_templates_copy_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Copy" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Copy" + } + } + }, + "summary": "No Description for post on /api/{version}/job_templates/{id}/copy/", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/credentials/": { + "get": { + "description": "credentials associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_credentials_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "cloud": false, + "created": "2018-02-01T08:00:00.000000Z", + "credential_type": 1, + "description": "", + "id": 2, + "inputs": { + "password": "$encrypted$", + "username": "bob" + }, + "kind": "ssh", + "kubernetes": false, + "managed": false, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "First Cred", + "organization": null, + "related": { + "access_list": "/api/v2/credentials/2/access_list/", + "activity_stream": "/api/v2/credentials/2/activity_stream/", + "copy": "/api/v2/credentials/2/copy/", + "credential_type": "/api/v2/credential_types/1/", + "input_sources": "/api/v2/credentials/2/input_sources/", + "object_roles": "/api/v2/credentials/2/object_roles/", + "owner_teams": "/api/v2/credentials/2/owner_teams/", + "owner_users": "/api/v2/credentials/2/owner_users/" + }, + "summary_fields": { + "credential_type": { + "description": "", + "id": 1, + "name": "Machine" + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the credential", + "id": 31, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the credential", + "id": 33, + "name": "Read" + }, + "use_role": { + "description": "Can use the credential in a job template", + "id": 32, + "name": "Use" + } + }, + "owners": [], + "user_capabilities": { + "copy": true, + "delete": true, + "edit": true, + "use": true + } + }, + "type": "credential", + "url": "/api/v2/credentials/2/" + } + ] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Credential" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "credentials associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_credentials_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "credential_type": 1, + "inputs": { + "password": "secret", + "username": "bob" + }, + "name": "First Cred" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "cloud": false, + "created": "2018-02-01T08:00:00.000000Z", + "credential_type": 1, + "description": "", + "id": 2, + "inputs": { + "password": "$encrypted$", + "username": "bob" + }, + "kind": "ssh", + "kubernetes": false, + "managed": false, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "First Cred", + "organization": null, + "related": { + "access_list": "/api/v2/credentials/2/access_list/", + "activity_stream": "/api/v2/credentials/2/activity_stream/", + "copy": "/api/v2/credentials/2/copy/", + "credential_type": "/api/v2/credential_types/1/", + "input_sources": "/api/v2/credentials/2/input_sources/", + "object_roles": "/api/v2/credentials/2/object_roles/", + "owner_teams": "/api/v2/credentials/2/owner_teams/", + "owner_users": "/api/v2/credentials/2/owner_users/" + }, + "summary_fields": { + "credential_type": { + "description": "", + "id": 1, + "name": "Machine" + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the credential", + "id": 31, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the credential", + "id": 33, + "name": "Read" + }, + "use_role": { + "description": "Can use the credential in a job template", + "id": 32, + "name": "Use" + } + }, + "owners": [], + "user_capabilities": { + "copy": true, + "delete": true, + "edit": true, + "use": true + } + }, + "type": "credential", + "url": "/api/v2/credentials/2/" + } + }, + "schema": { + "$ref": "#/definitions/Credential" + } + }, + "400": { + "examples": { + "application/json": { + "error": "Cannot assign multiple Machine credentials." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/github/": { + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_job_templates_github_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/job_templates/{id}/github/", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/gitlab/": { + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_job_templates_gitlab_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/job_templates/{id}/gitlab/", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/instance_groups/": { + "get": { + "description": "instance groups associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_instance_groups_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 5, + "next": null, + "previous": null, + "results": [ + { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 3, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "group-0", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/3/access_list/", + "instances": "/api/v2/instance_groups/3/instances/", + "jobs": "/api/v2/instance_groups/3/jobs/", + "object_roles": "/api/v2/instance_groups/3/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 12, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 14, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 13, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/3/" + }, + { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 1, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "group-3", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/1/access_list/", + "instances": "/api/v2/instance_groups/1/instances/", + "jobs": "/api/v2/instance_groups/1/jobs/", + "object_roles": "/api/v2/instance_groups/1/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 5, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 7, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 6, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/1/" + }, + { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 4, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "group-1", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/4/access_list/", + "instances": "/api/v2/instance_groups/4/instances/", + "jobs": "/api/v2/instance_groups/4/jobs/", + "object_roles": "/api/v2/instance_groups/4/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 15, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 17, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 16, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/4/" + }, + { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 2, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "group-2", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/2/access_list/", + "instances": "/api/v2/instance_groups/2/instances/", + "jobs": "/api/v2/instance_groups/2/jobs/", + "object_roles": "/api/v2/instance_groups/2/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 9, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 11, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 10, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/2/" + }, + { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 5, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "group-4", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/5/access_list/", + "instances": "/api/v2/instance_groups/5/instances/", + "jobs": "/api/v2/instance_groups/5/jobs/", + "object_roles": "/api/v2/instance_groups/5/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 18, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 20, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 19, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/5/" + } + ] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InstanceGroup" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "instance groups associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_instance_groups_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "associate": true, + "id": 5 + } + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/jobs/": { + "get": { + "description": "jobs associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of jobs\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more job records. \n\n## Results\n\nEach job data structure includes the following fields:\n\n* `id`: Database ID for this job. (integer)\n* `type`: Data type for this job. (choice)\n* `url`: URL for this job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job was created. (datetime)\n* `modified`: Timestamp when this job was last modified. (datetime)\n* `name`: Name of this job. (string)\n* `description`: Optional description of this job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n - `scan`: Scan\n* `inventory`: (id)\n* `project`: (id)\n* `playbook`: (string)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `forks`: (integer)\n* `limit`: (string)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json)\n* `job_tags`: (string)\n* `force_handlers`: (boolean)\n* `skip_tags`: (string)\n* `start_at_task`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean)\n* `organization`: The organization used to determine access to this unified job. (id)\n* `job_template`: (id)\n* `passwords_needed_to_start`: (field)\n* `allow_simultaneous`: (boolean)\n* `artifacts`: (json)\n* `scm_revision`: The SCM Revision from the Project used for this job, if available (string)\n* `instance_group`: The Instance group the job was run under (id)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean)\n* `job_slice_number`: If part of a sliced job, the ID of the inventory slice operated on. If not part of sliced job, parameter is not used. (integer)\n* `job_slice_count`: If ran as part of sliced jobs, the total number of slices. If 1, job is not part of a sliced job. (integer)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `webhook_guid`: Unique identifier of the event that triggered this webhook (string)\n\n\n\n## Sorting\n\nTo specify that jobs are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_jobs_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/JobList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/job_templates/{id}/labels/": { + "get": { + "description": "labels associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of labels\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more label records. \n\n## Results\n\nEach label data structure includes the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n## Sorting\n\nTo specify that labels are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_labels_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Label" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "labels associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of labels\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more label records. \n\n## Results\n\nEach label data structure includes the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n## Sorting\n\nTo specify that labels are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_labels_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Label" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Label" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/launch/": { + "get": { + "description": "launched and whether any passwords are required to launch the job_template.\nThe response will include the following fields:\n\n* `ask_variables_on_launch`: Flag indicating whether the job_template is\n configured to prompt for variables upon launch (boolean, read-only)\n* `ask_tags_on_launch`: Flag indicating whether the job_template is\n configured to prompt for tags upon launch (boolean, read-only)\n* `ask_skip_tags_on_launch`: Flag indicating whether the job_template is\n configured to prompt for skip_tags upon launch (boolean, read-only)\n* `ask_job_type_on_launch`: Flag indicating whether the job_template is\n configured to prompt for job_type upon launch (boolean, read-only)\n* `ask_limit_on_launch`: Flag indicating whether the job_template is\n configured to prompt for limit upon launch (boolean, read-only)\n* `ask_inventory_on_launch`: Flag indicating whether the job_template is\n configured to prompt for inventory upon launch (boolean, read-only)\n* `ask_credential_on_launch`: Flag indicating whether the job_template is\n configured to prompt for credential upon launch (boolean, read-only)\n* `can_start_without_user_input`: Flag indicating if the job_template can be\n launched without user-input (boolean, read-only)\n* `passwords_needed_to_start`: Password names required to launch the\n job_template (array, read-only)\n* `variables_needed_to_start`: Required variable names required to launch the\n job_template (array, read-only)\n* `survey_enabled`: Flag indicating whether the job_template has an enabled\n survey (boolean, read-only)\n* `inventory_needed_to_start`: Flag indicating the presence of an inventory\n associated with the job template. If not then one should be supplied when\n launching the job (boolean, read-only)\n\nMake a POST request to this resource to launch the job_template. If any\npasswords, inventory, or extra variables (extra_vars) are required, they must\nbe passed via POST data, with extra_vars given as a YAML or JSON string and\nescaped parentheses. If the `inventory_needed_to_start` is `True` then the\n`inventory` is required.\n\nIf successful, the response status code will be 201. If any required passwords\nare not provided, a 400 status code will be returned. If the job cannot be\nlaunched, a 405 status code will be returned. If the provided credential or\ninventory are not allowed to be used by the user, then a 403 status code will\nbe returned.", + "operationId": "api_job_templates_launch_read", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "ask_credential_on_launch": true, + "ask_diff_mode_on_launch": false, + "ask_execution_environment_on_launch": false, + "ask_forks_on_launch": false, + "ask_instance_groups_on_launch": false, + "ask_inventory_on_launch": false, + "ask_job_slice_count_on_launch": false, + "ask_job_type_on_launch": false, + "ask_labels_on_launch": false, + "ask_limit_on_launch": false, + "ask_scm_branch_on_launch": false, + "ask_skip_tags_on_launch": false, + "ask_tags_on_launch": false, + "ask_timeout_on_launch": false, + "ask_variables_on_launch": false, + "ask_verbosity_on_launch": false, + "can_start_without_user_input": false, + "credential_needed_to_start": false, + "defaults": { + "credentials": [ + { + "credential_type": 1, + "id": 1, + "name": "machine-cred", + "passwords_needed": [ + "ssh_password" + ] + } + ], + "diff_mode": false, + "execution_environment": {}, + "extra_vars": "", + "forks": 0, + "instance_groups": [], + "inventory": { + "id": 1, + "name": "test-inv" + }, + "job_slice_count": 1, + "job_tags": "", + "job_type": "run", + "limit": "", + "scm_branch": "", + "skip_tags": "", + "timeout": 0, + "verbosity": 0 + }, + "inventory_needed_to_start": false, + "job_template_data": { + "description": "", + "id": 1, + "name": "test-job_template" + }, + "passwords_needed_to_start": [ + "ssh_password" + ], + "survey_enabled": false, + "variables_needed_to_start": [] + } + }, + "schema": { + "$ref": "#/definitions/JobLaunch" + } + } + }, + "summary": "Make a GET request to this resource to determine if the job_template can be", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "launched and whether any passwords are required to launch the job_template.\nThe response will include the following fields:\n\n* `ask_variables_on_launch`: Flag indicating whether the job_template is\n configured to prompt for variables upon launch (boolean, read-only)\n* `ask_tags_on_launch`: Flag indicating whether the job_template is\n configured to prompt for tags upon launch (boolean, read-only)\n* `ask_skip_tags_on_launch`: Flag indicating whether the job_template is\n configured to prompt for skip_tags upon launch (boolean, read-only)\n* `ask_job_type_on_launch`: Flag indicating whether the job_template is\n configured to prompt for job_type upon launch (boolean, read-only)\n* `ask_limit_on_launch`: Flag indicating whether the job_template is\n configured to prompt for limit upon launch (boolean, read-only)\n* `ask_inventory_on_launch`: Flag indicating whether the job_template is\n configured to prompt for inventory upon launch (boolean, read-only)\n* `ask_credential_on_launch`: Flag indicating whether the job_template is\n configured to prompt for credential upon launch (boolean, read-only)\n* `can_start_without_user_input`: Flag indicating if the job_template can be\n launched without user-input (boolean, read-only)\n* `passwords_needed_to_start`: Password names required to launch the\n job_template (array, read-only)\n* `variables_needed_to_start`: Required variable names required to launch the\n job_template (array, read-only)\n* `survey_enabled`: Flag indicating whether the job_template has an enabled\n survey (boolean, read-only)\n* `inventory_needed_to_start`: Flag indicating the presence of an inventory\n associated with the job template. If not then one should be supplied when\n launching the job (boolean, read-only)\n\nMake a POST request to this resource to launch the job_template. If any\npasswords, inventory, or extra variables (extra_vars) are required, they must\nbe passed via POST data, with extra_vars given as a YAML or JSON string and\nescaped parentheses. If the `inventory_needed_to_start` is `True` then the\n`inventory` is required.\n\nIf successful, the response status code will be 201. If any required passwords\nare not provided, a 400 status code will be returned. If the job cannot be\nlaunched, a 405 status code will be returned. If the provided credential or\ninventory are not allowed to be used by the user, then a 403 status code will\nbe returned.", + "operationId": "api_job_templates_launch_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "extra_vars": { + "survey_var": 7 + } + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "ignored_fields": { + "extra_vars": { + "survey_var": 7 + } + }, + "job": 968 + } + }, + "schema": { + "$ref": "#/definitions/JobLaunch" + } + }, + "400": { + "examples": { + "application/json": { + "variables_needed_to_start": [ + "'secret_value' value missing" + ] + } + } + } + }, + "summary": "Make a GET request to this resource to determine if the job_template can be", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/notification_templates_error/": { + "get": { + "description": "notification templates associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_notification_templates_error_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_notification_templates_error_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/notification_templates_started/": { + "get": { + "description": "notification templates associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_notification_templates_started_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_notification_templates_started_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/notification_templates_success/": { + "get": { + "description": "notification templates associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_notification_templates_success_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_notification_templates_success_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/object_roles/": { + "get": { + "description": "roles associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of roles\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more role records. \n\n## Results\n\nEach role data structure includes the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)\n\n\n\n## Sorting\n\nTo specify that roles are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_object_roles_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Role" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/job_templates/{id}/schedules/": { + "get": { + "description": "schedules associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of schedules\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more schedule records. \n\n## Results\n\nEach schedule data structure includes the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n## Sorting\n\nTo specify that schedules are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_schedules_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Schedule" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "schedules associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of schedules\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more schedule records. \n\n## Results\n\nEach schedule data structure includes the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n## Sorting\n\nTo specify that schedules are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_schedules_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "extra_data": "{\"var1\": \"$encrypted$\"}", + "name": "test sch", + "rrule": "DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "diff_mode": null, + "dtend": "2015-11-17T05:00:00Z", + "dtstart": "2015-11-17T05:00:00Z", + "enabled": true, + "execution_environment": null, + "extra_data": {}, + "forks": null, + "id": 1, + "inventory": null, + "job_slice_count": null, + "job_tags": null, + "job_type": null, + "limit": null, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "test sch", + "next_run": null, + "related": { + "credentials": "/api/v2/schedules/1/credentials/", + "instance_groups": "/api/v2/schedules/1/instance_groups/", + "inventory": "/api/v2/inventories/1/", + "labels": "/api/v2/schedules/1/labels/", + "project": "/api/v2/projects/1/", + "unified_job_template": "/api/v2/job_templates/2/", + "unified_jobs": "/api/v2/schedules/1/jobs/" + }, + "rrule": "DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1", + "scm_branch": null, + "skip_tags": null, + "summary_fields": { + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + }, + "unified_job_template": { + "description": "", + "id": 2, + "name": "test-jt", + "unified_job_type": "job" + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "timeout": null, + "timezone": "UTC", + "type": "schedule", + "unified_job_template": 2, + "until": "", + "url": "/api/v2/schedules/1/", + "verbosity": null + } + }, + "schema": { + "$ref": "#/definitions/Schedule" + } + }, + "400": { + "examples": { + "application/json": { + "rrule": [ + "Multiple DTSTART is not supported.", + "INTERVAL required in rrule: RULE:FREQ=SECONDLY", + "RRULE may not contain both COUNT and UNTIL: RULE:FREQ=MINUTELY;INTERVAL=10;COUNT=5;UNTIL=20220101", + "rrule parsing failed validation: 'NoneType' object has no attribute 'group'" + ] + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/slice_workflow_jobs/": { + "get": { + "description": "workflow jobs associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow jobs\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job records. \n\n## Results\n\nEach workflow job data structure includes the following fields:\n\n* `id`: Database ID for this workflow job. (integer)\n* `type`: Data type for this workflow job. (choice)\n* `url`: URL for this workflow job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job was created. (datetime)\n* `modified`: Timestamp when this workflow job was last modified. (datetime)\n* `name`: Name of this workflow job. (string)\n* `description`: Optional description of this workflow job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `workflow_job_template`: (id)\n* `extra_vars`: (json)\n* `allow_simultaneous`: (boolean)\n* `job_template`: If automatically created for a sliced job run, the job template the workflow job was created from. (id)\n* `is_sliced_job`: (boolean)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `limit`: (string)\n* `scm_branch`: (string)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `webhook_guid`: Unique identifier of the event that triggered this webhook (string)\n* `skip_tags`: (string)\n* `job_tags`: (string)\n\n\n\n## Sorting\n\nTo specify that workflow jobs are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_slice_workflow_jobs_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowJobList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "workflow jobs associated with the selected\njob template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow jobs\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job records. \n\n## Results\n\nEach workflow job data structure includes the following fields:\n\n* `id`: Database ID for this workflow job. (integer)\n* `type`: Data type for this workflow job. (choice)\n* `url`: URL for this workflow job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job was created. (datetime)\n* `modified`: Timestamp when this workflow job was last modified. (datetime)\n* `name`: Name of this workflow job. (string)\n* `description`: Optional description of this workflow job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `workflow_job_template`: (id)\n* `extra_vars`: (json)\n* `allow_simultaneous`: (boolean)\n* `job_template`: If automatically created for a sliced job run, the job template the workflow job was created from. (id)\n* `is_sliced_job`: (boolean)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `limit`: (string)\n* `scm_branch`: (string)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `webhook_guid`: Unique identifier of the event that triggered this webhook (string)\n* `skip_tags`: (string)\n* `job_tags`: (string)\n\n\n\n## Sorting\n\nTo specify that workflow jobs are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_job_templates_slice_workflow_jobs_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/WorkflowJobList" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobList" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/survey_spec/": { + "delete": { + "description": "\n {\n \"name\": \"Simple Survey\",\n \"description\": \"Description of the simple survey\",\n \"spec\": [\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"example question\",\n \t\"question_description\": \"What is your favorite color?\",\n \t\"variable\": \"favorite_color\",\n \t\"required\": false,\n \t\"default\": \"blue\"\n }\n ]\n }\n\n`name` and `description` are required elements at the beginning of the survey specification. `spec` must be a\nlist of survey items.\n\nWithin each survey item `type` must be one of:\n\n* text: For survey questions expecting a textual answer\n* password: For survey questions expecting a password or other sensitive information\n* integer: For survey questions expecting a whole number answer\n* float: For survey questions expecting a decimal number\n* multiplechoice: For survey questions where one option from a list is required\n* multiselect: For survey questions where multiple items from a presented list can be selected\n\nEach item must contain a `question_name` and `question_description` field that describes the survey question itself.\nThe `variable` elements of each survey items represents the key that will be given to the playbook when the job template\nis launched. It will contain the value as a result of the survey.\n\nHere is a more comprehensive example showing the various question types and their acceptable parameters:\n\n {\n \"name\": \"Simple\",\n \"description\": \"Description\",\n \"spec\": [\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"cantbeshort\",\n \t\"question_description\": \"What is a long answer\",\n \t\"variable\": \"long_answer\",\n \t\"choices\": \"\",\n \t\"min\": 5,\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"Leeloo Minai Lekarariba-Laminai-Tchai Ekbat De Sebat\"\n },\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"cantbelong\",\n \t\"question_description\": \"What is a short answer\",\n \t\"variable\": \"short_answer\",\n \t\"choices\": \"\",\n \t\"min\": \"\",\n \t\"max\": 7,\n \t\"required\": false,\n \t\"default\": \"leeloo\"\n },\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"reqd\",\n \t\"question_description\": \"I should be required\",\n \t\"variable\": \"reqd_answer\",\n \t\"choices\": \"\",\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": true,\n \t\"default\": \"NOT OPTIONAL\"\n },\n {\n \t\"type\": \"multiplechoice\",\n \t\"question_name\": \"achoice\",\n \t\"question_description\": \"Need one of these\",\n \t\"variable\": \"single_choice\",\n \t\"choices\": [\"one\", \"two\"],\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"one\"\n },\n {\n \t\"type\": \"multiselect\",\n \t\"question_name\": \"mchoice\",\n \t\"question_description\": \"Can have multiples of these\",\n \t\"variable\": \"multi_choice\",\n \t\"choices\": [\"one\", \"two\", \"three\"],\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"one\\nthree\"\n },\n {\n \"type\": \"integer\",\n \"question_name\": \"integerchoice\",\n \"question_description\": \"I need an int here\",\n \"variable\": \"int_answer\",\n \"choices\": \"\",\n \"min\": 1,\n \"max\": 5,\n \"required\": false,\n \"default\": \"\"\n },\n {\n \"type\": \"float\",\n \"question_name\": \"float\",\n \"question_description\": \"I need a float here\",\n \"variable\": \"float_answer\",\n \"choices\": \"\",\n \"min\": 2,\n \"max\": 5,\n \"required\": false,\n \"default\": \"\"\n }\n ]\n }", + "operationId": "api_job_templates_survey_spec_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Here is an example survey specification", + "tags": [ + "api" + ] + }, + "get": { + "description": "\n {\n \"name\": \"Simple Survey\",\n \"description\": \"Description of the simple survey\",\n \"spec\": [\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"example question\",\n \t\"question_description\": \"What is your favorite color?\",\n \t\"variable\": \"favorite_color\",\n \t\"required\": false,\n \t\"default\": \"blue\"\n }\n ]\n }\n\n`name` and `description` are required elements at the beginning of the survey specification. `spec` must be a\nlist of survey items.\n\nWithin each survey item `type` must be one of:\n\n* text: For survey questions expecting a textual answer\n* password: For survey questions expecting a password or other sensitive information\n* integer: For survey questions expecting a whole number answer\n* float: For survey questions expecting a decimal number\n* multiplechoice: For survey questions where one option from a list is required\n* multiselect: For survey questions where multiple items from a presented list can be selected\n\nEach item must contain a `question_name` and `question_description` field that describes the survey question itself.\nThe `variable` elements of each survey items represents the key that will be given to the playbook when the job template\nis launched. It will contain the value as a result of the survey.\n\nHere is a more comprehensive example showing the various question types and their acceptable parameters:\n\n {\n \"name\": \"Simple\",\n \"description\": \"Description\",\n \"spec\": [\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"cantbeshort\",\n \t\"question_description\": \"What is a long answer\",\n \t\"variable\": \"long_answer\",\n \t\"choices\": \"\",\n \t\"min\": 5,\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"Leeloo Minai Lekarariba-Laminai-Tchai Ekbat De Sebat\"\n },\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"cantbelong\",\n \t\"question_description\": \"What is a short answer\",\n \t\"variable\": \"short_answer\",\n \t\"choices\": \"\",\n \t\"min\": \"\",\n \t\"max\": 7,\n \t\"required\": false,\n \t\"default\": \"leeloo\"\n },\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"reqd\",\n \t\"question_description\": \"I should be required\",\n \t\"variable\": \"reqd_answer\",\n \t\"choices\": \"\",\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": true,\n \t\"default\": \"NOT OPTIONAL\"\n },\n {\n \t\"type\": \"multiplechoice\",\n \t\"question_name\": \"achoice\",\n \t\"question_description\": \"Need one of these\",\n \t\"variable\": \"single_choice\",\n \t\"choices\": [\"one\", \"two\"],\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"one\"\n },\n {\n \t\"type\": \"multiselect\",\n \t\"question_name\": \"mchoice\",\n \t\"question_description\": \"Can have multiples of these\",\n \t\"variable\": \"multi_choice\",\n \t\"choices\": [\"one\", \"two\", \"three\"],\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"one\\nthree\"\n },\n {\n \"type\": \"integer\",\n \"question_name\": \"integerchoice\",\n \"question_description\": \"I need an int here\",\n \"variable\": \"int_answer\",\n \"choices\": \"\",\n \"min\": 1,\n \"max\": 5,\n \"required\": false,\n \"default\": \"\"\n },\n {\n \"type\": \"float\",\n \"question_name\": \"float\",\n \"question_description\": \"I need a float here\",\n \"variable\": \"float_answer\",\n \"choices\": \"\",\n \"min\": 2,\n \"max\": 5,\n \"required\": false,\n \"default\": \"\"\n }\n ]\n }", + "operationId": "api_job_templates_survey_spec_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": {} + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Empty" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Here is an example survey specification", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "\n {\n \"name\": \"Simple Survey\",\n \"description\": \"Description of the simple survey\",\n \"spec\": [\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"example question\",\n \t\"question_description\": \"What is your favorite color?\",\n \t\"variable\": \"favorite_color\",\n \t\"required\": false,\n \t\"default\": \"blue\"\n }\n ]\n }\n\n`name` and `description` are required elements at the beginning of the survey specification. `spec` must be a\nlist of survey items.\n\nWithin each survey item `type` must be one of:\n\n* text: For survey questions expecting a textual answer\n* password: For survey questions expecting a password or other sensitive information\n* integer: For survey questions expecting a whole number answer\n* float: For survey questions expecting a decimal number\n* multiplechoice: For survey questions where one option from a list is required\n* multiselect: For survey questions where multiple items from a presented list can be selected\n\nEach item must contain a `question_name` and `question_description` field that describes the survey question itself.\nThe `variable` elements of each survey items represents the key that will be given to the playbook when the job template\nis launched. It will contain the value as a result of the survey.\n\nHere is a more comprehensive example showing the various question types and their acceptable parameters:\n\n {\n \"name\": \"Simple\",\n \"description\": \"Description\",\n \"spec\": [\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"cantbeshort\",\n \t\"question_description\": \"What is a long answer\",\n \t\"variable\": \"long_answer\",\n \t\"choices\": \"\",\n \t\"min\": 5,\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"Leeloo Minai Lekarariba-Laminai-Tchai Ekbat De Sebat\"\n },\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"cantbelong\",\n \t\"question_description\": \"What is a short answer\",\n \t\"variable\": \"short_answer\",\n \t\"choices\": \"\",\n \t\"min\": \"\",\n \t\"max\": 7,\n \t\"required\": false,\n \t\"default\": \"leeloo\"\n },\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"reqd\",\n \t\"question_description\": \"I should be required\",\n \t\"variable\": \"reqd_answer\",\n \t\"choices\": \"\",\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": true,\n \t\"default\": \"NOT OPTIONAL\"\n },\n {\n \t\"type\": \"multiplechoice\",\n \t\"question_name\": \"achoice\",\n \t\"question_description\": \"Need one of these\",\n \t\"variable\": \"single_choice\",\n \t\"choices\": [\"one\", \"two\"],\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"one\"\n },\n {\n \t\"type\": \"multiselect\",\n \t\"question_name\": \"mchoice\",\n \t\"question_description\": \"Can have multiples of these\",\n \t\"variable\": \"multi_choice\",\n \t\"choices\": [\"one\", \"two\", \"three\"],\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"one\\nthree\"\n },\n {\n \"type\": \"integer\",\n \"question_name\": \"integerchoice\",\n \"question_description\": \"I need an int here\",\n \"variable\": \"int_answer\",\n \"choices\": \"\",\n \"min\": 1,\n \"max\": 5,\n \"required\": false,\n \"default\": \"\"\n },\n {\n \"type\": \"float\",\n \"question_name\": \"float\",\n \"question_description\": \"I need a float here\",\n \"variable\": \"float_answer\",\n \"choices\": \"\",\n \"min\": 2,\n \"max\": 5,\n \"required\": false,\n \"default\": \"\"\n }\n ]\n }", + "operationId": "api_job_templates_survey_spec_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "description": "A survey", + "name": "my survey", + "spec": [ + { + "index": 0, + "max": 3, + "question_name": "What is your password?", + "required": true, + "type": "password", + "variable": "secret_value" + } + ] + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Empty" + } + }, + "400": { + "examples": { + "application/json": { + "error": "Default value OrderedDict([('some-invalid', 'dict')]) in survey question 0 expected to be string." + } + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Here is an example survey specification", + "tags": [ + "api" + ] + } + }, + "/api/v2/job_templates/{id}/webhook_key/": { + "get": { + "description": "", + "operationId": "api_job_templates_webhook_key_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "webhook_key": "" + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Empty" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "No Description for get on /api/{version}/job_templates/{id}/webhook_key/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_job_templates_webhook_key_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Empty" + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "webhook_key": "WRrjmah6hQg26CqeQwlCBdYtKIsveMrPDSZnR7quCesNgNIedO" + } + }, + "schema": { + "$ref": "#/definitions/Empty" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "No Description for post on /api/{version}/job_templates/{id}/webhook_key/", + "tags": [ + "api" + ] + } + }, + "/api/v2/jobs/": { + "get": { + "description": "jobs.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of jobs\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more job records. \n\n## Results\n\nEach job data structure includes the following fields:\n\n* `id`: Database ID for this job. (integer)\n* `type`: Data type for this job. (choice)\n* `url`: URL for this job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job was created. (datetime)\n* `modified`: Timestamp when this job was last modified. (datetime)\n* `name`: Name of this job. (string)\n* `description`: Optional description of this job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n - `scan`: Scan\n* `inventory`: (id)\n* `project`: (id)\n* `playbook`: (string)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `forks`: (integer)\n* `limit`: (string)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json)\n* `job_tags`: (string)\n* `force_handlers`: (boolean)\n* `skip_tags`: (string)\n* `start_at_task`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean)\n* `organization`: The organization used to determine access to this unified job. (id)\n* `job_template`: (id)\n* `passwords_needed_to_start`: (field)\n* `allow_simultaneous`: (boolean)\n* `artifacts`: (json)\n* `scm_revision`: The SCM Revision from the Project used for this job, if available (string)\n* `instance_group`: The Instance group the job was run under (id)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean)\n* `job_slice_number`: If part of a sliced job, the ID of the inventory slice operated on. If not part of sliced job, parameter is not used. (integer)\n* `job_slice_count`: If ran as part of sliced jobs, the total number of slices. If 1, job is not part of a sliced job. (integer)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `webhook_guid`: Unique identifier of the event that triggered this webhook (string)\n\n\n\n## Sorting\n\nTo specify that jobs are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_jobs_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/JobList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "Filtering on extra_vars is not allowed." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/jobs/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this job. (integer)\n* `type`: Data type for this job. (choice)\n* `url`: URL for this job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job was created. (datetime)\n* `modified`: Timestamp when this job was last modified. (datetime)\n* `name`: Name of this job. (string)\n* `description`: Optional description of this job. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n - `scan`: Scan\n* `inventory`: (id)\n* `project`: (id)\n* `playbook`: (string)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `forks`: (integer)\n* `limit`: (string)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json)\n* `job_tags`: (string)\n* `force_handlers`: (boolean)\n* `skip_tags`: (string)\n* `start_at_task`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean)\n* `organization`: The organization used to determine access to this unified job. (id)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_args`: (string)\n* `job_cwd`: (string)\n* `job_env`: (json)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `result_traceback`: (string)\n* `event_processing_finished`: Indicates whether all of the events generated by this unified job have been saved to the database. (boolean)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `job_template`: (id)\n* `passwords_needed_to_start`: (field)\n* `allow_simultaneous`: (boolean)\n* `artifacts`: (json)\n* `scm_revision`: The SCM Revision from the Project used for this job, if available (string)\n* `instance_group`: The Instance group the job was run under (id)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean)\n* `job_slice_number`: If part of a sliced job, the ID of the inventory slice operated on. If not part of sliced job, parameter is not used. (integer)\n* `job_slice_count`: If ran as part of sliced jobs, the total number of slices. If 1, job is not part of a sliced job. (integer)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `webhook_guid`: Unique identifier of the event that triggered this webhook (string)\n* `host_status_counts`: Playbook stats from the Ansible playbook_on_stats event. (json)\n* `playbook_counts`: A count of all plays and tasks for the job run. (field)\n* `custom_virtualenv`: (string)\n\n\n\n\n\n# Delete a Job:\n\nMake a DELETE request to this resource to delete this job.", + "operationId": "api_jobs_delete", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "204": { + "description": "" + }, + "403": { + "examples": { + "application/json": { + "detail": "Cannot delete running job resource." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single job", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this job. (integer)\n* `type`: Data type for this job. (choice)\n* `url`: URL for this job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job was created. (datetime)\n* `modified`: Timestamp when this job was last modified. (datetime)\n* `name`: Name of this job. (string)\n* `description`: Optional description of this job. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n - `scan`: Scan\n* `inventory`: (id)\n* `project`: (id)\n* `playbook`: (string)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `forks`: (integer)\n* `limit`: (string)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json)\n* `job_tags`: (string)\n* `force_handlers`: (boolean)\n* `skip_tags`: (string)\n* `start_at_task`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean)\n* `organization`: The organization used to determine access to this unified job. (id)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_args`: (string)\n* `job_cwd`: (string)\n* `job_env`: (json)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `result_traceback`: (string)\n* `event_processing_finished`: Indicates whether all of the events generated by this unified job have been saved to the database. (boolean)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `job_template`: (id)\n* `passwords_needed_to_start`: (field)\n* `allow_simultaneous`: (boolean)\n* `artifacts`: (json)\n* `scm_revision`: The SCM Revision from the Project used for this job, if available (string)\n* `instance_group`: The Instance group the job was run under (id)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean)\n* `job_slice_number`: If part of a sliced job, the ID of the inventory slice operated on. If not part of sliced job, parameter is not used. (integer)\n* `job_slice_count`: If ran as part of sliced jobs, the total number of slices. If 1, job is not part of a sliced job. (integer)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `webhook_guid`: Unique identifier of the event that triggered this webhook (string)\n* `host_status_counts`: Playbook stats from the Ansible playbook_on_stats event. (json)\n* `playbook_counts`: A count of all plays and tasks for the job run. (field)\n* `custom_virtualenv`: (string)\n\n\n\n\n\n# Delete a Job:\n\nMake a DELETE request to this resource to delete this job.", + "operationId": "api_jobs_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/JobDetail" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single job", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/jobs/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\njob.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_jobs_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/jobs/{id}/cancel/": { + "get": { + "description": "The response will include the following field:\n\n* `can_cancel`: Indicates whether this job can be canceled (boolean, read-only)\n\n\n\n# Cancel a Job\nMake a POST request to this resource to cancel a pending or running job. The\nresponse status code will be 202 if successful, or 405 if the job cannot be\ncanceled.", + "operationId": "api_jobs_cancel_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/JobCancel" + } + } + }, + "summary": "Make a GET request to this resource to determine if the job can be canceled.", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "The response will include the following field:\n\n* `can_cancel`: Indicates whether this job can be canceled (boolean, read-only)\n\n\n\n# Cancel a Job\nMake a POST request to this resource to cancel a pending or running job. The\nresponse status code will be 202 if successful, or 405 if the job cannot be\ncanceled.", + "operationId": "api_jobs_cancel_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/JobCancel" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/JobCancel" + } + } + }, + "summary": "Make a GET request to this resource to determine if the job can be canceled.", + "tags": [ + "api" + ] + } + }, + "/api/v2/jobs/{id}/create_schedule/": { + "get": { + "description": "the job template that launched this job, and uses the same\nparameters that the job was launched with. These parameters include all\n\"prompted\" resources such as `extra_vars`, `inventory`, `limit`, etc.\n\nJobs that were launched with user-provided passwords cannot have a schedule\ncreated from them.\n\nMake a GET request for information about what those prompts are and\nwhether or not a schedule can be created.", + "operationId": "api_jobs_create_schedule_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/JobCreateSchedule" + } + } + }, + "summary": "Make a POST request to this endpoint to create a schedule that launches", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "the job template that launched this job, and uses the same\nparameters that the job was launched with. These parameters include all\n\"prompted\" resources such as `extra_vars`, `inventory`, `limit`, etc.\n\nJobs that were launched with user-provided passwords cannot have a schedule\ncreated from them.\n\nMake a GET request for information about what those prompts are and\nwhether or not a schedule can be created.", + "operationId": "api_jobs_create_schedule_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/JobCreateSchedule" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/JobCreateSchedule" + } + } + }, + "summary": "Make a POST request to this endpoint to create a schedule that launches", + "tags": [ + "api" + ] + } + }, + "/api/v2/jobs/{id}/credentials/": { + "get": { + "description": "credentials associated with the selected\njob.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_jobs_credentials_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Credential" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/jobs/{id}/job_events/": { + "get": { + "description": "job events associated with the selected\njob.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of job events\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more job event records. \n\n## Results\n\nEach job event data structure includes the following fields:\n\n* `id`: Database ID for this job event. (integer)\n* `type`: Data type for this job event. (choice)\n* `url`: URL for this job event. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job event was created. (datetime)\n* `modified`: Timestamp when this job event was last modified. (datetime)\n* `job`: (id)\n* `event`: (choice)\n - `runner_on_failed`: Host Failed\n - `runner_on_start`: Host Started\n - `runner_on_ok`: Host OK\n - `runner_on_error`: Host Failure\n - `runner_on_skipped`: Host Skipped\n - `runner_on_unreachable`: Host Unreachable\n - `runner_on_no_hosts`: No Hosts Remaining\n - `runner_on_async_poll`: Host Polling\n - `runner_on_async_ok`: Host Async OK\n - `runner_on_async_failed`: Host Async Failure\n - `runner_item_on_ok`: Item OK\n - `runner_item_on_failed`: Item Failed\n - `runner_item_on_skipped`: Item Skipped\n - `runner_retry`: Host Retry\n - `runner_on_file_diff`: File Difference\n - `playbook_on_start`: Playbook Started\n - `playbook_on_notify`: Running Handlers\n - `playbook_on_include`: Including File\n - `playbook_on_no_hosts_matched`: No Hosts Matched\n - `playbook_on_no_hosts_remaining`: No Hosts Remaining\n - `playbook_on_task_start`: Task Started\n - `playbook_on_vars_prompt`: Variables Prompted\n - `playbook_on_setup`: Gathering Facts\n - `playbook_on_import_for_host`: internal: on Import for Host\n - `playbook_on_not_import_for_host`: internal: on Not Import for Host\n - `playbook_on_play_start`: Play Started\n - `playbook_on_stats`: Playbook Complete\n - `debug`: Debug\n - `verbose`: Verbose\n - `deprecated`: Deprecated\n - `warning`: Warning\n - `system_warning`: System Warning\n - `error`: Error\n* `counter`: (integer)\n* `event_display`: (string)\n* `event_data`: (json)\n* `event_level`: (integer)\n* `failed`: (boolean)\n* `changed`: (boolean)\n* `uuid`: (string)\n* `parent_uuid`: (string)\n* `host`: (id)\n* `host_name`: (string)\n* `playbook`: (string)\n* `play`: (string)\n* `task`: (string)\n* `role`: (string)\n* `stdout`: (string)\n* `start_line`: (integer)\n* `end_line`: (integer)\n* `verbosity`: (integer)\n\n\n\n## Sorting\n\nTo specify that job events are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_jobs_job_events_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "changed": false, + "counter": 0, + "created": "2018-02-01T08:00:00.000000Z", + "end_line": 0, + "event": "runner_on_start", + "event_data": {}, + "event_display": "Host Started", + "event_level": 3, + "failed": false, + "host": null, + "host_name": "", + "id": 1, + "job": 1, + "modified": "2018-02-01T08:00:00.000000Z", + "parent_uuid": "", + "play": "", + "playbook": "", + "related": { + "children": "/api/v2/job_events/1/children/", + "job": "/api/v2/jobs/1/" + }, + "role": "", + "start_line": 0, + "stdout": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "summary_fields": { + "job": { + "description": "", + "elapsed": 0.0, + "failed": false, + "id": 1, + "job_template_id": 2, + "job_template_name": "jt", + "name": "jt", + "status": "new", + "type": "job" + }, + "role": {} + }, + "task": "", + "type": "job_event", + "url": "/api/v2/job_events/1/", + "uuid": "abc123", + "verbosity": 0 + } + ] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/JobEvent" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/jobs/{id}/job_events/children_summary/": { + "get": { + "description": "In order to collapse events and their children, the UI needs to know how\nmany children exist for a given event.\nThe UI also needs to know the order of the event (0 based index), which\nusually matches the counter, but not always.\nThis view returns a JSON object where the key is the event counter, and the value\nincludes the number of children (and grandchildren) events.\nOnly events with children are included in the output.\n\n## Example\n\ne.g. Demo Job Template job\ntuple(event counter, uuid, parent_uuid)\n\n```\n(1, '00000000-0000-0000-0000-000000000000', '')\n(2, '00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000')\n(3, '00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000')\n(4, '00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000')\n(5, '00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000')\n(6, '00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000')\n(7, '00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000')\n(8, '00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000')\n(9, '00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000')\n```\n\noutput\n\n```\n{\n\"1\": {\n \"rowNumber\": 0,\n \"numChildren\": 8\n},\n\"2\": {\n \"rowNumber\": 1,\n \"numChildren\": 6\n},\n\"3\": {\n \"rowNumber\": 2,\n \"numChildren\": 2\n},\n\"6\": {\n \"rowNumber\": 5,\n \"numChildren\": 2\n}\n}\n\"meta_event_nested_parent_uuid\": {}\n}\n```\n\ncounter 1 is event 0, and has 8 children\ncounter 2 is event 1, and has 6 children\netc.\n\nThe UI also needs to be able to collapse over \"meta\" events -- events that\nshow up due to verbosity or warnings from the system while the play is running.\nThese events have a 0 level event, with no parent uuid.\n\n```\nplaybook_on_start\nverbose\n playbook_on_play_start\n playbook_on_task_start\n runner_on_start <- level 3\nverbose <- jump to level 0\nverbose\n runner_on_ok <- jump back to level 3\n playbook_on_task_start\n runner_on_start\n runner_on_ok\nverbose\nverbose\n playbook_on_stats\n```\n\nThese verbose statements that fall in the middle of a series of children events\nare problematic for the UI.\nTo help, this view will attempt to place the events into the hierarchy, without\nthe event level jumps.\n\n```\nplaybook_on_start\n verbose\n playbook_on_play_start\n playbook_on_task_start\n runner_on_start <- A\n verbose <- this maps to the uuid of A\n verbose\n runner_on_ok\n playbook_on_task_start <- B\n runner_on_start\n runner_on_ok\n verbose <- this maps to the uuid of B\n verbose\n playbook_on_stats\n```\n\nThe output will include a JSON object where the key is the event counter,\nand the value is the assigned nested uuid.", + "operationId": "api_jobs_job_events_children_summary_list", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "children_summary": {}, + "event_processing_finished": true, + "is_tree": false, + "meta_event_nested_uuid": {} + } + } + } + }, + "summary": "Special view to facilitate processing job output in the UI.", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/jobs/{id}/job_host_summaries/": { + "get": { + "description": "job host summaries associated with the selected\njob.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of job host summaries\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more job host summary records. \n\n## Results\n\nEach job host summary data structure includes the following fields:\n\n* `id`: Database ID for this job host summary. (integer)\n* `type`: Data type for this job host summary. (choice)\n* `url`: URL for this job host summary. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job host summary was created. (datetime)\n* `modified`: Timestamp when this job host summary was last modified. (datetime)\n* `job`: (id)\n* `host`: (id)\n* `constructed_host`: Only for jobs run against constructed inventories, this links to the host inside the constructed inventory. (id)\n* `host_name`: (string)\n* `changed`: (integer)\n* `dark`: (integer)\n* `failures`: (integer)\n* `ok`: (integer)\n* `processed`: (integer)\n* `skipped`: (integer)\n* `failed`: (boolean)\n* `ignored`: (integer)\n* `rescued`: (integer)\n\n\n\n## Sorting\n\nTo specify that job host summaries are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_jobs_job_host_summaries_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/JobHostSummary" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/jobs/{id}/labels/": { + "get": { + "description": "labels associated with the selected\njob.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of labels\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more label records. \n\n## Results\n\nEach label data structure includes the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n## Sorting\n\nTo specify that labels are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_jobs_labels_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Label" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/jobs/{id}/notifications/": { + "get": { + "description": "notifications associated with the selected\njob.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notifications\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification records. \n\n## Results\n\nEach notification data structure includes the following fields:\n\n* `id`: Database ID for this notification. (integer)\n* `type`: Data type for this notification. (choice)\n* `url`: URL for this notification. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification was created. (datetime)\n* `modified`: Timestamp when this notification was last modified. (datetime)\n* `notification_template`: (id)\n* `error`: (string)\n* `status`: (choice)\n - `pending`: Pending\n - `successful`: Successful\n - `failed`: Failed\n* `notifications_sent`: (integer)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `recipients`: (string)\n* `subject`: (string)\n* `body`: Notification body (json)\n\n\n\n## Sorting\n\nTo specify that notifications are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_jobs_notifications_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Notification" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/jobs/{id}/relaunch/": { + "get": { + "description": "", + "operationId": "api_jobs_relaunch_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/JobRelaunch" + } + } + }, + "summary": "Make a POST request to this resource to launch a job. If any passwords or variables are required then they should be passed in via POST data. In order to determine what values are required in order to launch a job based on this job template you may make a GET request to this endpoint.", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_jobs_relaunch_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/JobRelaunch" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/JobRelaunch" + } + } + }, + "summary": "Make a POST request to this resource to launch a job. If any passwords or variables are required then they should be passed in via POST data. In order to determine what values are required in order to launch a job based on this job template you may make a GET request to this endpoint.", + "tags": [ + "api" + ] + } + }, + "/api/v2/jobs/{id}/stdout/": { + "get": { + "description": "job.\n\n## Format\n\nUse the `format` query string parameter to specify the output format.\n\n* Browsable API: `?format=api`\n* HTML: `?format=html`\n* Plain Text: `?format=txt`\n* Plain Text with ANSI color codes: `?format=ansi`\n* JSON structure: `?format=json`\n* Downloaded Plain Text: `?format=txt_download`\n* Downloaded Plain Text with ANSI color codes: `?format=ansi_download`\n\n(_New in Ansible Tower 2.0.0_) When using the Browsable API, HTML and JSON\nformats, the `start_line` and `end_line` query string parameters can be used\nto specify a range of line numbers to retrieve.\n\nUse `dark=1` or `dark=0` as a query string parameter to force or disable a\ndark background.\n\nFiles over 1.0\u00a0MB (configurable)\nwill not display in the browser. Use the `txt_download` or `ansi_download`\nformats to download the file directly to view it.", + "operationId": "api_jobs_stdout_read", + "parameters": [], + "produces": [ + "text/plain", + "text/plain", + "application/json", + "text/plain", + "text/plain", + "text/html; charset=utf-8" + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/UnifiedJobStdout" + } + } + }, + "summary": "Make GET request to this resource to retrieve the stdout from running this", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/labels/": { + "get": { + "description": "labels.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of labels\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more label records. \n\n## Results\n\nEach label data structure includes the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n## Sorting\n\nTo specify that labels are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_labels_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Label" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "labels.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of labels\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more label records. \n\n## Results\n\nEach label data structure includes the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n## Sorting\n\nTo specify that labels are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_labels_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Label" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Label" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/labels/{id}/": { + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n\n\n# Update a Label:\n\nMake a PUT or PATCH request to this resource to update this\nlabel. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this label. (string, required)\n* `organization`: Organization this label belongs to. (id, required)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.", + "operationId": "api_labels_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Label" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single label", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n\n\n# Update a Label:\n\nMake a PUT or PATCH request to this resource to update this\nlabel. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this label. (string, required)\n* `organization`: Organization this label belongs to. (id, required)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.", + "operationId": "api_labels_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Label" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Label" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single label", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n\n\n# Update a Label:\n\nMake a PUT or PATCH request to this resource to update this\nlabel. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this label. (string, required)\n* `organization`: Organization this label belongs to. (id, required)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.", + "operationId": "api_labels_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Label" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Label" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single label", + "tags": [ + "api" + ] + } + }, + "/api/v2/me/": { + "get": { + "description": "\nOne result should be returned containing the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\nUse the primary URL for the user (/api/v2/users/N/) to modify the user.", + "operationId": "api_me_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/User" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to retrieve user information about the current user.", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/mesh_visualizer/": { + "get": { + "description": "", + "operationId": "api_mesh_visualizer_list", + "parameters": [], + "responses": { + "200": { + "description": "" + } + }, + "summary": "Make a GET request to this resource to obtain a list all Receptor Nodes and their links.", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/metrics/": { + "get": { + "description": "", + "operationId": "api_metrics_list", + "parameters": [], + "produces": [ + "text/plain", + "application/json" + ], + "responses": { + "200": { + "description": "" + } + }, + "summary": "Show Metrics Details", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/notification_templates/": { + "get": { + "description": "notification templates.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_notification_templates_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_notification_templates_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/notification_templates/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n\n\n# Update a Notification Template:\n\nMake a PUT or PATCH request to this resource to update this\nnotification template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this notification template. (string, required)\n* `description`: Optional description of this notification template. (string, default=`\"\"`)\n* `organization`: (id, required)\n* `notification_type`: (choice, required)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json, default=`{}`)\n* `messages`: Optional custom messages for notification template. (json, default=`{'started': None, 'success': None, 'error': None, 'workflow_approval': None}`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Notification Template:\n\nMake a DELETE request to this resource to delete this notification template.", + "operationId": "api_notification_templates_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single notification template", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n\n\n# Update a Notification Template:\n\nMake a PUT or PATCH request to this resource to update this\nnotification template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this notification template. (string, required)\n* `description`: Optional description of this notification template. (string, default=`\"\"`)\n* `organization`: (id, required)\n* `notification_type`: (choice, required)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json, default=`{}`)\n* `messages`: Optional custom messages for notification template. (json, default=`{'started': None, 'success': None, 'error': None, 'workflow_approval': None}`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Notification Template:\n\nMake a DELETE request to this resource to delete this notification template.", + "operationId": "api_notification_templates_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single notification template", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n\n\n# Update a Notification Template:\n\nMake a PUT or PATCH request to this resource to update this\nnotification template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this notification template. (string, required)\n* `description`: Optional description of this notification template. (string, default=`\"\"`)\n* `organization`: (id, required)\n* `notification_type`: (choice, required)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json, default=`{}`)\n* `messages`: Optional custom messages for notification template. (json, default=`{'started': None, 'success': None, 'error': None, 'workflow_approval': None}`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Notification Template:\n\nMake a DELETE request to this resource to delete this notification template.", + "operationId": "api_notification_templates_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single notification template", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n\n\n# Update a Notification Template:\n\nMake a PUT or PATCH request to this resource to update this\nnotification template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this notification template. (string, required)\n* `description`: Optional description of this notification template. (string, default=`\"\"`)\n* `organization`: (id, required)\n* `notification_type`: (choice, required)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json, default=`{}`)\n* `messages`: Optional custom messages for notification template. (json, default=`{'started': None, 'success': None, 'error': None, 'workflow_approval': None}`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Notification Template:\n\nMake a DELETE request to this resource to delete this notification template.", + "operationId": "api_notification_templates_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single notification template", + "tags": [ + "api" + ] + } + }, + "/api/v2/notification_templates/{id}/copy/": { + "get": { + "description": "", + "operationId": "api_notification_templates_copy_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Copy" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "No Description for get on /api/{version}/notification_templates/{id}/copy/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_notification_templates_copy_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Copy" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Copy" + } + } + }, + "summary": "No Description for post on /api/{version}/notification_templates/{id}/copy/", + "tags": [ + "api" + ] + } + }, + "/api/v2/notification_templates/{id}/notifications/": { + "get": { + "description": "notifications associated with the selected\nnotification template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notifications\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification records. \n\n## Results\n\nEach notification data structure includes the following fields:\n\n* `id`: Database ID for this notification. (integer)\n* `type`: Data type for this notification. (choice)\n* `url`: URL for this notification. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification was created. (datetime)\n* `modified`: Timestamp when this notification was last modified. (datetime)\n* `notification_template`: (id)\n* `error`: (string)\n* `status`: (choice)\n - `pending`: Pending\n - `successful`: Successful\n - `failed`: Failed\n* `notifications_sent`: (integer)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `recipients`: (string)\n* `subject`: (string)\n* `body`: Notification body (json)\n\n\n\n## Sorting\n\nTo specify that notifications are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_notification_templates_notifications_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Notification" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/notification_templates/{id}/test/": { + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_notification_templates_test_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Empty" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Empty" + } + } + }, + "summary": "Test a Notification Template", + "tags": [ + "api" + ] + } + }, + "/api/v2/notifications/": { + "get": { + "description": "notifications.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notifications\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification records. \n\n## Results\n\nEach notification data structure includes the following fields:\n\n* `id`: Database ID for this notification. (integer)\n* `type`: Data type for this notification. (choice)\n* `url`: URL for this notification. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification was created. (datetime)\n* `modified`: Timestamp when this notification was last modified. (datetime)\n* `notification_template`: (id)\n* `error`: (string)\n* `status`: (choice)\n - `pending`: Pending\n - `successful`: Successful\n - `failed`: Failed\n* `notifications_sent`: (integer)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `recipients`: (string)\n* `subject`: (string)\n* `body`: Notification body (json)\n\n\n\n## Sorting\n\nTo specify that notifications are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_notifications_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Notification" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/notifications/{id}/": { + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this notification. (integer)\n* `type`: Data type for this notification. (choice)\n* `url`: URL for this notification. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification was created. (datetime)\n* `modified`: Timestamp when this notification was last modified. (datetime)\n* `notification_template`: (id)\n* `error`: (string)\n* `status`: (choice)\n - `pending`: Pending\n - `successful`: Successful\n - `failed`: Failed\n* `notifications_sent`: (integer)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `recipients`: (string)\n* `subject`: (string)\n* `body`: Notification body (json)", + "operationId": "api_notifications_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Notification" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single notification", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/organizations/": { + "get": { + "description": "organizations.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of organizations\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more organization records. \n\n## Results\n\nEach organization data structure includes the following fields:\n\n* `id`: Database ID for this organization. (integer)\n* `type`: Data type for this organization. (choice)\n* `url`: URL for this organization. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this organization was created. (datetime)\n* `modified`: Timestamp when this organization was last modified. (datetime)\n* `name`: Name of this organization. (string)\n* `description`: Optional description of this organization. (string)\n* `max_hosts`: Maximum number of hosts allowed to be managed by this organization. (integer)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run by this organization. (id)\n\n\n\n## Sorting\n\nTo specify that organizations are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Organization" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "organizations.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of organizations\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more organization records. \n\n## Results\n\nEach organization data structure includes the following fields:\n\n* `id`: Database ID for this organization. (integer)\n* `type`: Data type for this organization. (choice)\n* `url`: URL for this organization. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this organization was created. (datetime)\n* `modified`: Timestamp when this organization was last modified. (datetime)\n* `name`: Name of this organization. (string)\n* `description`: Optional description of this organization. (string)\n* `max_hosts`: Maximum number of hosts allowed to be managed by this organization. (integer)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run by this organization. (id)\n\n\n\n## Sorting\n\nTo specify that organizations are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Organization" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Organization" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this organization. (integer)\n* `type`: Data type for this organization. (choice)\n* `url`: URL for this organization. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this organization was created. (datetime)\n* `modified`: Timestamp when this organization was last modified. (datetime)\n* `name`: Name of this organization. (string)\n* `description`: Optional description of this organization. (string)\n* `max_hosts`: Maximum number of hosts allowed to be managed by this organization. (integer)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run by this organization. (id)\n\n\n\n\n\n# Update an Organization:\n\nMake a PUT or PATCH request to this resource to update this\norganization. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this organization. (string, required)\n* `description`: Optional description of this organization. (string, default=`\"\"`)\n* `max_hosts`: Maximum number of hosts allowed to be managed by this organization. (integer, default=`0`)\n\n* `default_environment`: The default execution environment for jobs run by this organization. (id, default=``)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Organization:\n\nMake a DELETE request to this resource to delete this organization.", + "operationId": "api_organizations_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single organization", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this organization. (integer)\n* `type`: Data type for this organization. (choice)\n* `url`: URL for this organization. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this organization was created. (datetime)\n* `modified`: Timestamp when this organization was last modified. (datetime)\n* `name`: Name of this organization. (string)\n* `description`: Optional description of this organization. (string)\n* `max_hosts`: Maximum number of hosts allowed to be managed by this organization. (integer)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run by this organization. (id)\n\n\n\n\n\n# Update an Organization:\n\nMake a PUT or PATCH request to this resource to update this\norganization. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this organization. (string, required)\n* `description`: Optional description of this organization. (string, default=`\"\"`)\n* `max_hosts`: Maximum number of hosts allowed to be managed by this organization. (integer, default=`0`)\n\n* `default_environment`: The default execution environment for jobs run by this organization. (id, default=``)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Organization:\n\nMake a DELETE request to this resource to delete this organization.", + "operationId": "api_organizations_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Organization" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single organization", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this organization. (integer)\n* `type`: Data type for this organization. (choice)\n* `url`: URL for this organization. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this organization was created. (datetime)\n* `modified`: Timestamp when this organization was last modified. (datetime)\n* `name`: Name of this organization. (string)\n* `description`: Optional description of this organization. (string)\n* `max_hosts`: Maximum number of hosts allowed to be managed by this organization. (integer)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run by this organization. (id)\n\n\n\n\n\n# Update an Organization:\n\nMake a PUT or PATCH request to this resource to update this\norganization. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this organization. (string, required)\n* `description`: Optional description of this organization. (string, default=`\"\"`)\n* `max_hosts`: Maximum number of hosts allowed to be managed by this organization. (integer, default=`0`)\n\n* `default_environment`: The default execution environment for jobs run by this organization. (id, default=``)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Organization:\n\nMake a DELETE request to this resource to delete this organization.", + "operationId": "api_organizations_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Organization" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Organization" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single organization", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this organization. (integer)\n* `type`: Data type for this organization. (choice)\n* `url`: URL for this organization. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this organization was created. (datetime)\n* `modified`: Timestamp when this organization was last modified. (datetime)\n* `name`: Name of this organization. (string)\n* `description`: Optional description of this organization. (string)\n* `max_hosts`: Maximum number of hosts allowed to be managed by this organization. (integer)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run by this organization. (id)\n\n\n\n\n\n# Update an Organization:\n\nMake a PUT or PATCH request to this resource to update this\norganization. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this organization. (string, required)\n* `description`: Optional description of this organization. (string, default=`\"\"`)\n* `max_hosts`: Maximum number of hosts allowed to be managed by this organization. (integer, default=`0`)\n\n* `default_environment`: The default execution environment for jobs run by this organization. (id, default=``)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Organization:\n\nMake a DELETE request to this resource to delete this organization.", + "operationId": "api_organizations_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Organization" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Organization" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single organization", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/access_list/": { + "get": { + "description": "users.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_access_list_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ResourceAccessListElement" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/organizations/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/organizations/{id}/admins/": { + "get": { + "description": "admin users associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of admin users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more admin user records. \n\n## Results\n\nEach admin user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that admin users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_admins_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/User" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "admin users associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of admin users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more admin user records. \n\n## Results\n\nEach admin user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that admin users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_admins_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/User" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/User" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/applications/": { + "get": { + "description": "applications associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of applications\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more application records. \n\n## Results\n\nEach application data structure includes the following fields:\n\n* `id`: Database ID for this application. (integer)\n* `type`: Data type for this application. (choice)\n* `url`: URL for this application. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this application was created. (datetime)\n* `modified`: Timestamp when this application was last modified. (datetime)\n* `name`: Name of this application. (string)\n* `description`: Optional description of this application. (string)\n* `client_id`: (string)\n* `client_secret`: Used for more stringent verification of access to an application when creating a token. (string)\n* `client_type`: Set to Public or Confidential depending on how secure the client device is. (choice)\n - `confidential`: Confidential\n - `public`: Public\n* `redirect_uris`: Allowed URIs list, space separated (string)\n* `authorization_grant_type`: The Grant type the user must use for acquire tokens for this application. (choice)\n - `authorization-code`: Authorization code\n - `password`: Resource owner password-based\n* `skip_authorization`: Set True to skip authorization step for completely trusted applications. (boolean)\n* `organization`: Organization containing this application. (id)\n\n\n\n## Sorting\n\nTo specify that applications are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_applications_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/OAuth2Application" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "applications associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of applications\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more application records. \n\n## Results\n\nEach application data structure includes the following fields:\n\n* `id`: Database ID for this application. (integer)\n* `type`: Data type for this application. (choice)\n* `url`: URL for this application. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this application was created. (datetime)\n* `modified`: Timestamp when this application was last modified. (datetime)\n* `name`: Name of this application. (string)\n* `description`: Optional description of this application. (string)\n* `client_id`: (string)\n* `client_secret`: Used for more stringent verification of access to an application when creating a token. (string)\n* `client_type`: Set to Public or Confidential depending on how secure the client device is. (choice)\n - `confidential`: Confidential\n - `public`: Public\n* `redirect_uris`: Allowed URIs list, space separated (string)\n* `authorization_grant_type`: The Grant type the user must use for acquire tokens for this application. (choice)\n - `authorization-code`: Authorization code\n - `password`: Resource owner password-based\n* `skip_authorization`: Set True to skip authorization step for completely trusted applications. (boolean)\n* `organization`: Organization containing this application. (id)\n\n\n\n## Sorting\n\nTo specify that applications are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_applications_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/OAuth2Application" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/OAuth2Application" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/credentials/": { + "get": { + "description": "credentials associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: Inherit permissions from organization roles. If provided on creation, do not give either user or team. (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_credentials_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 0, + "next": null, + "previous": null, + "results": [] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/OrganizationCredentialSerializerCreate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "credentials associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: Inherit permissions from organization roles. If provided on creation, do not give either user or team. (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_credentials_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/OrganizationCredentialSerializerCreate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/OrganizationCredentialSerializerCreate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/execution_environments/": { + "get": { + "description": "execution environments associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of execution environments\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more execution environment records. \n\n## Results\n\nEach execution environment data structure includes the following fields:\n\n* `id`: Database ID for this execution environment. (integer)\n* `type`: Data type for this execution environment. (choice)\n* `url`: URL for this execution environment. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this execution environment was created. (datetime)\n* `modified`: Timestamp when this execution environment was last modified. (datetime)\n* `name`: Name of this execution environment. (string)\n* `description`: Optional description of this execution environment. (string)\n* `organization`: The organization used to determine access to this execution environment. (id)\n* `image`: The full image location, including the container registry, image name, and version tag. (string)\n* `managed`: (boolean)\n* `credential`: (id)\n* `pull`: Pull image before running? (choice)\n - `\"\"`: ---------\n - `always`: Always pull container before running.\n - `missing`: Only pull the image if not present before running.\n - `never`: Never pull container before running.\n\n\n\n## Sorting\n\nTo specify that execution environments are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_execution_environments_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ExecutionEnvironment" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "execution environments associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of execution environments\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more execution environment records. \n\n## Results\n\nEach execution environment data structure includes the following fields:\n\n* `id`: Database ID for this execution environment. (integer)\n* `type`: Data type for this execution environment. (choice)\n* `url`: URL for this execution environment. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this execution environment was created. (datetime)\n* `modified`: Timestamp when this execution environment was last modified. (datetime)\n* `name`: Name of this execution environment. (string)\n* `description`: Optional description of this execution environment. (string)\n* `organization`: The organization used to determine access to this execution environment. (id)\n* `image`: The full image location, including the container registry, image name, and version tag. (string)\n* `managed`: (boolean)\n* `credential`: (id)\n* `pull`: Pull image before running? (choice)\n - `\"\"`: ---------\n - `always`: Always pull container before running.\n - `missing`: Only pull the image if not present before running.\n - `never`: Never pull container before running.\n\n\n\n## Sorting\n\nTo specify that execution environments are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_execution_environments_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/ExecutionEnvironment" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/ExecutionEnvironment" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/galaxy_credentials/": { + "get": { + "description": "credentials associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_galaxy_credentials_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Credential" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "credentials associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_galaxy_credentials_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Credential" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Credential" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/instance_groups/": { + "get": { + "description": "instance groups associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_instance_groups_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 5, + "next": null, + "previous": null, + "results": [ + { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 3, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "group-4", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/3/access_list/", + "instances": "/api/v2/instance_groups/3/instances/", + "jobs": "/api/v2/instance_groups/3/jobs/", + "object_roles": "/api/v2/instance_groups/3/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 22, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 24, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 23, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/3/" + }, + { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 4, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "group-0", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/4/access_list/", + "instances": "/api/v2/instance_groups/4/instances/", + "jobs": "/api/v2/instance_groups/4/jobs/", + "object_roles": "/api/v2/instance_groups/4/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 25, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 27, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 26, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/4/" + }, + { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 1, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "group-2", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/1/access_list/", + "instances": "/api/v2/instance_groups/1/instances/", + "jobs": "/api/v2/instance_groups/1/jobs/", + "object_roles": "/api/v2/instance_groups/1/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 16, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 18, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 17, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/1/" + }, + { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 2, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "group-1", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/2/access_list/", + "instances": "/api/v2/instance_groups/2/instances/", + "jobs": "/api/v2/instance_groups/2/jobs/", + "object_roles": "/api/v2/instance_groups/2/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 19, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 21, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 20, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/2/" + }, + { + "capacity": 0, + "consumed_capacity": 0, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "id": 5, + "instances": 0, + "is_container_group": false, + "jobs_running": 0, + "jobs_total": 0, + "max_concurrent_jobs": 0, + "max_forks": 0, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "group-3", + "percent_capacity_remaining": 0.0, + "pod_spec_override": "", + "policy_instance_list": [], + "policy_instance_minimum": 0, + "policy_instance_percentage": 0, + "related": { + "access_list": "/api/v2/instance_groups/5/access_list/", + "instances": "/api/v2/instance_groups/5/instances/", + "jobs": "/api/v2/instance_groups/5/jobs/", + "object_roles": "/api/v2/instance_groups/5/object_roles/" + }, + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the instance group", + "id": 28, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the instance group", + "id": 30, + "name": "Read" + }, + "use_role": { + "description": "Can use the instance group in a job template", + "id": 29, + "name": "Use" + } + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "instance_group", + "url": "/api/v2/instance_groups/5/" + } + ] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InstanceGroup" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "instance groups associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_instance_groups_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "associate": true, + "id": 5 + } + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/inventories/": { + "get": { + "description": "inventories associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventories\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory records. \n\n## Results\n\nEach inventory data structure includes the following fields:\n\n* `id`: Database ID for this inventory. (integer)\n* `type`: Data type for this inventory. (choice)\n* `url`: URL for this inventory. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory was created. (datetime)\n* `modified`: Timestamp when this inventory was last modified. (datetime)\n* `name`: Name of this inventory. (string)\n* `description`: Optional description of this inventory. (string)\n* `organization`: Organization containing this inventory. (id)\n* `kind`: Kind of inventory being represented. (choice)\n - `\"\"`: Hosts have a direct link to this inventory.\n - `smart`: Hosts for inventory generated using the host_filter property.\n - `constructed`: Parse list of source inventories with the constructed inventory plugin.\n* `host_filter`: Filter that will be applied to the hosts of this inventory. (string)\n* `variables`: Inventory variables in JSON or YAML format. (json)\n* `has_active_failures`: This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed. (boolean)\n* `total_hosts`: This field is deprecated and will be removed in a future release. Total number of hosts in this inventory. (integer)\n* `hosts_with_active_failures`: This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures. (integer)\n* `total_groups`: This field is deprecated and will be removed in a future release. Total number of groups in this inventory. (integer)\n* `has_inventory_sources`: This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources. (boolean)\n* `total_inventory_sources`: Total number of external inventory sources configured within this inventory. (integer)\n* `inventory_sources_with_failures`: Number of external inventory sources in this inventory with failures. (integer)\n* `pending_deletion`: Flag indicating the inventory is being deleted. (boolean)\n* `prevent_instance_group_fallback`: If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n## Sorting\n\nTo specify that inventories are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_inventories_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Inventory" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/organizations/{id}/job_templates/": { + "get": { + "description": "job templates associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of job templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more job template records. \n\n## Results\n\nEach job template data structure includes the following fields:\n\n* `id`: Database ID for this job template. (integer)\n* `type`: Data type for this job template. (choice)\n* `url`: URL for this job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job template was created. (datetime)\n* `modified`: Timestamp when this job template was last modified. (datetime)\n* `name`: Name of this job template. (string)\n* `description`: Optional description of this job template. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `project`: (id)\n* `playbook`: (string)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `forks`: (integer)\n* `limit`: (string)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json)\n* `job_tags`: (string)\n* `force_handlers`: (boolean)\n* `skip_tags`: (string)\n* `start_at_task`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean)\n* `organization`: The organization used to determine access to this template. (id)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n* `execution_environment`: The container image to be used for execution. (id)\n* `host_config_key`: (string)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_diff_mode_on_launch`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_job_type_on_launch`: (boolean)\n* `ask_verbosity_on_launch`: (boolean)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_credential_on_launch`: (boolean)\n* `ask_execution_environment_on_launch`: (boolean)\n* `ask_labels_on_launch`: (boolean)\n* `ask_forks_on_launch`: (boolean)\n* `ask_job_slice_count_on_launch`: (boolean)\n* `ask_timeout_on_launch`: (boolean)\n* `ask_instance_groups_on_launch`: (boolean)\n* `survey_enabled`: (boolean)\n* `become_enabled`: (boolean)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean)\n* `allow_simultaneous`: (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `job_slice_count`: The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1. (integer)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `prevent_instance_group_fallback`: If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n## Sorting\n\nTo specify that job templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_job_templates_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/JobTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "job templates associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of job templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more job template records. \n\n## Results\n\nEach job template data structure includes the following fields:\n\n* `id`: Database ID for this job template. (integer)\n* `type`: Data type for this job template. (choice)\n* `url`: URL for this job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this job template was created. (datetime)\n* `modified`: Timestamp when this job template was last modified. (datetime)\n* `name`: Name of this job template. (string)\n* `description`: Optional description of this job template. (string)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `inventory`: (id)\n* `project`: (id)\n* `playbook`: (string)\n* `scm_branch`: Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `forks`: (integer)\n* `limit`: (string)\n* `verbosity`: (choice)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `extra_vars`: (json)\n* `job_tags`: (string)\n* `force_handlers`: (boolean)\n* `skip_tags`: (string)\n* `start_at_task`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `use_fact_cache`: If enabled, the service will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible. (boolean)\n* `organization`: The organization used to determine access to this template. (id)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n* `execution_environment`: The container image to be used for execution. (id)\n* `host_config_key`: (string)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_diff_mode_on_launch`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_job_type_on_launch`: (boolean)\n* `ask_verbosity_on_launch`: (boolean)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_credential_on_launch`: (boolean)\n* `ask_execution_environment_on_launch`: (boolean)\n* `ask_labels_on_launch`: (boolean)\n* `ask_forks_on_launch`: (boolean)\n* `ask_job_slice_count_on_launch`: (boolean)\n* `ask_timeout_on_launch`: (boolean)\n* `ask_instance_groups_on_launch`: (boolean)\n* `survey_enabled`: (boolean)\n* `become_enabled`: (boolean)\n* `diff_mode`: If enabled, textual changes made to any templated files on the host are shown in the standard output (boolean)\n* `allow_simultaneous`: (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `job_slice_count`: The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1. (integer)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `prevent_instance_group_fallback`: If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied. (boolean)\n\n\n\n## Sorting\n\nTo specify that job templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_job_templates_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/JobTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/JobTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/notification_templates/": { + "get": { + "description": "notification templates associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_notification_templates_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_notification_templates_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/notification_templates_approvals/": { + "get": { + "description": "notification templates associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_notification_templates_approvals_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_notification_templates_approvals_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/notification_templates_error/": { + "get": { + "description": "notification templates associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_notification_templates_error_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_notification_templates_error_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/notification_templates_started/": { + "get": { + "description": "notification templates associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_notification_templates_started_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_notification_templates_started_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/notification_templates_success/": { + "get": { + "description": "notification templates associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_notification_templates_success_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_notification_templates_success_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/object_roles/": { + "get": { + "description": "roles associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of roles\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more role records. \n\n## Results\n\nEach role data structure includes the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)\n\n\n\n## Sorting\n\nTo specify that roles are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_object_roles_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Role" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/organizations/{id}/projects/": { + "get": { + "description": "projects associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of projects\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more project records. \n\n## Results\n\nEach project data structure includes the following fields:\n\n* `id`: Database ID for this project. (integer)\n* `type`: Data type for this project. (choice)\n* `url`: URL for this project. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this project was created. (datetime)\n* `modified`: Timestamp when this project was last modified. (datetime)\n* `name`: Name of this project. (string)\n* `description`: Optional description of this project. (string)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean)\n* `credential`: (id)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `scm_revision`: The last revision fetched by a project update (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n* `organization`: The organization used to determine access to this template. (id)\n* `scm_update_on_launch`: Update the project when a job is launched that uses the project. (boolean)\n* `scm_update_cache_timeout`: The number of seconds after the last project update ran that a new project update will be launched as a job dependency. (integer)\n* `allow_override`: Allow changing the SCM branch or revision in a job template that uses this project. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run using this project. (id)\n* `signature_validation_credential`: An optional credential used for validating files in the project against unexpected changes. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n## Sorting\n\nTo specify that projects are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_projects_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Project" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "projects associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of projects\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more project records. \n\n## Results\n\nEach project data structure includes the following fields:\n\n* `id`: Database ID for this project. (integer)\n* `type`: Data type for this project. (choice)\n* `url`: URL for this project. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this project was created. (datetime)\n* `modified`: Timestamp when this project was last modified. (datetime)\n* `name`: Name of this project. (string)\n* `description`: Optional description of this project. (string)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean)\n* `credential`: (id)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `scm_revision`: The last revision fetched by a project update (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n* `organization`: The organization used to determine access to this template. (id)\n* `scm_update_on_launch`: Update the project when a job is launched that uses the project. (boolean)\n* `scm_update_cache_timeout`: The number of seconds after the last project update ran that a new project update will be launched as a job dependency. (integer)\n* `allow_override`: Allow changing the SCM branch or revision in a job template that uses this project. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run using this project. (id)\n* `signature_validation_credential`: An optional credential used for validating files in the project against unexpected changes. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n## Sorting\n\nTo specify that projects are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_projects_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Project" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Project" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/teams/": { + "get": { + "description": "teams associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of teams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more team records. \n\n## Results\n\nEach team data structure includes the following fields:\n\n* `id`: Database ID for this team. (integer)\n* `type`: Data type for this team. (choice)\n* `url`: URL for this team. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this team was created. (datetime)\n* `modified`: Timestamp when this team was last modified. (datetime)\n* `name`: Name of this team. (string)\n* `description`: Optional description of this team. (string)\n* `organization`: (id)\n\n\n\n## Sorting\n\nTo specify that teams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_teams_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Team" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "teams associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of teams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more team records. \n\n## Results\n\nEach team data structure includes the following fields:\n\n* `id`: Database ID for this team. (integer)\n* `type`: Data type for this team. (choice)\n* `url`: URL for this team. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this team was created. (datetime)\n* `modified`: Timestamp when this team was last modified. (datetime)\n* `name`: Name of this team. (string)\n* `description`: Optional description of this team. (string)\n* `organization`: (id)\n\n\n\n## Sorting\n\nTo specify that teams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_teams_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Team" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Team" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/users/": { + "get": { + "description": "users associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_users_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/User" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "users associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_users_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "id": 1, + "is_system_auditor": true + } + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/User" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/organizations/{id}/workflow_job_templates/": { + "get": { + "description": "workflow job templates associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job template records. \n\n## Results\n\nEach workflow job template data structure includes the following fields:\n\n* `id`: Database ID for this workflow job template. (integer)\n* `type`: Data type for this workflow job template. (choice)\n* `url`: URL for this workflow job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template was created. (datetime)\n* `modified`: Timestamp when this workflow job template was last modified. (datetime)\n* `name`: Name of this workflow job template. (string)\n* `description`: Optional description of this workflow job template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `extra_vars`: (json)\n* `organization`: The organization used to determine access to this template. (id)\n* `survey_enabled`: (boolean)\n* `allow_simultaneous`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `limit`: (string)\n* `scm_branch`: (string)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `ask_labels_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `skip_tags`: (string)\n* `job_tags`: (string)\n\n\n\n## Sorting\n\nTo specify that workflow job templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_workflow_job_templates_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowJobTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "workflow job templates associated with the selected\norganization.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job template records. \n\n## Results\n\nEach workflow job template data structure includes the following fields:\n\n* `id`: Database ID for this workflow job template. (integer)\n* `type`: Data type for this workflow job template. (choice)\n* `url`: URL for this workflow job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template was created. (datetime)\n* `modified`: Timestamp when this workflow job template was last modified. (datetime)\n* `name`: Name of this workflow job template. (string)\n* `description`: Optional description of this workflow job template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `extra_vars`: (json)\n* `organization`: The organization used to determine access to this template. (id)\n* `survey_enabled`: (boolean)\n* `allow_simultaneous`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `limit`: (string)\n* `scm_branch`: (string)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `ask_labels_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `skip_tags`: (string)\n* `job_tags`: (string)\n\n\n\n## Sorting\n\nTo specify that workflow job templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_organizations_workflow_job_templates_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/WorkflowJobTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/ping/": { + "get": { + "description": "this requires no auth and is intended for use by the installer process.", + "operationId": "api_ping_list", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "active_node": "awx_1", + "ha": false, + "install_uuid": "00000000-0000-0000-0000-000000000000", + "instance_groups": [], + "instances": [], + "version": "4.5.1.dev5+g0b88711771" + } + } + } + }, + "summary": "Everything returned here should be considered public / insecure, as", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/project_updates/": { + "get": { + "description": "project updates.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of project updates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more project update records. \n\n## Results\n\nEach project update data structure includes the following fields:\n\n* `id`: Database ID for this project update. (integer)\n* `type`: Data type for this project update. (choice)\n* `url`: URL for this project update. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this project update was created. (datetime)\n* `modified`: Timestamp when this project update was last modified. (datetime)\n* `name`: Name of this project update. (string)\n* `description`: Optional description of this project update. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean)\n* `credential`: (id)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `scm_revision`: The SCM Revision discovered by this update for the given project and branch. (string)\n* `project`: (id)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `job_tags`: Parts of the project update playbook that will be run. (string)\n\n\n\n## Sorting\n\nTo specify that project updates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_project_updates_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ProjectUpdateList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/project_updates/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this project update. (integer)\n* `type`: Data type for this project update. (choice)\n* `url`: URL for this project update. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this project update was created. (datetime)\n* `modified`: Timestamp when this project update was last modified. (datetime)\n* `name`: Name of this project update. (string)\n* `description`: Optional description of this project update. (string)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean)\n* `credential`: (id)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `scm_revision`: The SCM Revision discovered by this update for the given project and branch. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_args`: (string)\n* `job_cwd`: (string)\n* `job_env`: (json)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `result_traceback`: (string)\n* `event_processing_finished`: Indicates whether all of the events generated by this unified job have been saved to the database. (boolean)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `project`: (id)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `job_tags`: Parts of the project update playbook that will be run. (string)\n* `host_status_counts`: Playbook stats from the Ansible playbook_on_stats event. (json)\n* `playbook_counts`: A count of all plays and tasks for the job run. (field)\n\n\n\n\n\n# Delete a Project Update:\n\nMake a DELETE request to this resource to delete this project update.", + "operationId": "api_project_updates_delete", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "204": { + "description": "" + }, + "403": { + "examples": { + "application/json": { + "detail": "Cannot delete running job resource." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single project update", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this project update. (integer)\n* `type`: Data type for this project update. (choice)\n* `url`: URL for this project update. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this project update was created. (datetime)\n* `modified`: Timestamp when this project update was last modified. (datetime)\n* `name`: Name of this project update. (string)\n* `description`: Optional description of this project update. (string)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean)\n* `credential`: (id)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `scm_revision`: The SCM Revision discovered by this update for the given project and branch. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_args`: (string)\n* `job_cwd`: (string)\n* `job_env`: (json)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `result_traceback`: (string)\n* `event_processing_finished`: Indicates whether all of the events generated by this unified job have been saved to the database. (boolean)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `project`: (id)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `job_tags`: Parts of the project update playbook that will be run. (string)\n* `host_status_counts`: Playbook stats from the Ansible playbook_on_stats event. (json)\n* `playbook_counts`: A count of all plays and tasks for the job run. (field)\n\n\n\n\n\n# Delete a Project Update:\n\nMake a DELETE request to this resource to delete this project update.", + "operationId": "api_project_updates_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/ProjectUpdateDetail" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single project update", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/project_updates/{id}/cancel/": { + "get": { + "description": "canceled. The response will include the following field:\n\n* `can_cancel`: Indicates whether this update can be canceled (boolean,\n read-only)\n\nMake a POST request to this resource to cancel a pending or running project\nupdate. The response status code will be 202 if successful, or 405 if the\nupdate cannot be canceled.", + "operationId": "api_project_updates_cancel_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/ProjectUpdateCancel" + } + } + }, + "summary": "Make a GET request to this resource to determine if the project update can be", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "canceled. The response will include the following field:\n\n* `can_cancel`: Indicates whether this update can be canceled (boolean,\n read-only)\n\nMake a POST request to this resource to cancel a pending or running project\nupdate. The response status code will be 202 if successful, or 405 if the\nupdate cannot be canceled.", + "operationId": "api_project_updates_cancel_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/ProjectUpdateCancel" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/ProjectUpdateCancel" + } + } + }, + "summary": "Make a GET request to this resource to determine if the project update can be", + "tags": [ + "api" + ] + } + }, + "/api/v2/project_updates/{id}/events/": { + "get": { + "description": "project update events associated with the selected\nproject update.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of project update events\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more project update event records. \n\n## Results\n\nEach project update event data structure includes the following fields:\n\n* `id`: Database ID for this project update event. (integer)\n* `type`: Data type for this project update event. (choice)\n* `url`: URL for this project update event. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this project update event was created. (datetime)\n* `modified`: Timestamp when this project update event was last modified. (datetime)\n* `event`: (choice)\n - `runner_on_failed`: Host Failed\n - `runner_on_start`: Host Started\n - `runner_on_ok`: Host OK\n - `runner_on_error`: Host Failure\n - `runner_on_skipped`: Host Skipped\n - `runner_on_unreachable`: Host Unreachable\n - `runner_on_no_hosts`: No Hosts Remaining\n - `runner_on_async_poll`: Host Polling\n - `runner_on_async_ok`: Host Async OK\n - `runner_on_async_failed`: Host Async Failure\n - `runner_item_on_ok`: Item OK\n - `runner_item_on_failed`: Item Failed\n - `runner_item_on_skipped`: Item Skipped\n - `runner_retry`: Host Retry\n - `runner_on_file_diff`: File Difference\n - `playbook_on_start`: Playbook Started\n - `playbook_on_notify`: Running Handlers\n - `playbook_on_include`: Including File\n - `playbook_on_no_hosts_matched`: No Hosts Matched\n - `playbook_on_no_hosts_remaining`: No Hosts Remaining\n - `playbook_on_task_start`: Task Started\n - `playbook_on_vars_prompt`: Variables Prompted\n - `playbook_on_setup`: Gathering Facts\n - `playbook_on_import_for_host`: internal: on Import for Host\n - `playbook_on_not_import_for_host`: internal: on Not Import for Host\n - `playbook_on_play_start`: Play Started\n - `playbook_on_stats`: Playbook Complete\n - `debug`: Debug\n - `verbose`: Verbose\n - `deprecated`: Deprecated\n - `warning`: Warning\n - `system_warning`: System Warning\n - `error`: Error\n* `counter`: (integer)\n* `event_display`: (string)\n* `event_data`: (json)\n* `event_level`: (integer)\n* `failed`: (boolean)\n* `changed`: (boolean)\n* `uuid`: (string)\n* `host_name`: (field)\n* `playbook`: (string)\n* `play`: (string)\n* `task`: (string)\n* `role`: (string)\n* `stdout`: (field)\n* `start_line`: (integer)\n* `end_line`: (integer)\n* `verbosity`: (integer)\n* `project_update`: (id)\n\n\n\n## Sorting\n\nTo specify that project update events are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_project_updates_events_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ProjectUpdateEvent" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/project_updates/{id}/notifications/": { + "get": { + "description": "notifications associated with the selected\nproject update.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notifications\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification records. \n\n## Results\n\nEach notification data structure includes the following fields:\n\n* `id`: Database ID for this notification. (integer)\n* `type`: Data type for this notification. (choice)\n* `url`: URL for this notification. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification was created. (datetime)\n* `modified`: Timestamp when this notification was last modified. (datetime)\n* `notification_template`: (id)\n* `error`: (string)\n* `status`: (choice)\n - `pending`: Pending\n - `successful`: Successful\n - `failed`: Failed\n* `notifications_sent`: (integer)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `recipients`: (string)\n* `subject`: (string)\n* `body`: Notification body (json)\n\n\n\n## Sorting\n\nTo specify that notifications are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_project_updates_notifications_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Notification" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/project_updates/{id}/scm_inventory_updates/": { + "get": { + "description": "inventory updates associated with the selected\nproject update.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventory updates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory update records. \n\n## Results\n\nEach inventory update data structure includes the following fields:\n\n* `id`: Database ID for this inventory update. (integer)\n* `type`: Data type for this inventory update. (choice)\n* `url`: URL for this inventory update. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory update was created. (datetime)\n* `modified`: Timestamp when this inventory update was last modified. (datetime)\n* `name`: Name of this inventory update. (string)\n* `description`: Optional description of this inventory update. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `inventory`: (id)\n* `inventory_source`: (id)\n* `license_error`: (boolean)\n* `org_host_limit_error`: (boolean)\n* `source_project_update`: Inventory files from this Project Update were used for the inventory update. (id)\n* `instance_group`: The Instance group the job was run under (id)\n* `scm_revision`: The SCM Revision from the Project used for this inventory update. Only applicable to inventories source from scm (string)\n\n\n\n## Sorting\n\nTo specify that inventory updates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_project_updates_scm_inventory_updates_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InventoryUpdateList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/project_updates/{id}/stdout/": { + "get": { + "description": "project update.\n\n## Format\n\nUse the `format` query string parameter to specify the output format.\n\n* Browsable API: `?format=api`\n* HTML: `?format=html`\n* Plain Text: `?format=txt`\n* Plain Text with ANSI color codes: `?format=ansi`\n* JSON structure: `?format=json`\n* Downloaded Plain Text: `?format=txt_download`\n* Downloaded Plain Text with ANSI color codes: `?format=ansi_download`\n\n(_New in Ansible Tower 2.0.0_) When using the Browsable API, HTML and JSON\nformats, the `start_line` and `end_line` query string parameters can be used\nto specify a range of line numbers to retrieve.\n\nUse `dark=1` or `dark=0` as a query string parameter to force or disable a\ndark background.\n\nFiles over 1.0\u00a0MB (configurable)\nwill not display in the browser. Use the `txt_download` or `ansi_download`\nformats to download the file directly to view it.", + "operationId": "api_project_updates_stdout_read", + "parameters": [], + "produces": [ + "text/plain", + "text/plain", + "application/json", + "text/plain", + "text/plain", + "text/html; charset=utf-8" + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/UnifiedJobStdout" + } + } + }, + "summary": "Make GET request to this resource to retrieve the stdout from running this", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/projects/": { + "get": { + "description": "projects.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of projects\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more project records. \n\n## Results\n\nEach project data structure includes the following fields:\n\n* `id`: Database ID for this project. (integer)\n* `type`: Data type for this project. (choice)\n* `url`: URL for this project. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this project was created. (datetime)\n* `modified`: Timestamp when this project was last modified. (datetime)\n* `name`: Name of this project. (string)\n* `description`: Optional description of this project. (string)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean)\n* `credential`: (id)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `scm_revision`: The last revision fetched by a project update (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n* `organization`: The organization used to determine access to this template. (id)\n* `scm_update_on_launch`: Update the project when a job is launched that uses the project. (boolean)\n* `scm_update_cache_timeout`: The number of seconds after the last project update ran that a new project update will be launched as a job dependency. (integer)\n* `allow_override`: Allow changing the SCM branch or revision in a job template that uses this project. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run using this project. (id)\n* `signature_validation_credential`: An optional credential used for validating files in the project against unexpected changes. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n## Sorting\n\nTo specify that projects are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Project" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "projects.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of projects\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more project records. \n\n## Results\n\nEach project data structure includes the following fields:\n\n* `id`: Database ID for this project. (integer)\n* `type`: Data type for this project. (choice)\n* `url`: URL for this project. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this project was created. (datetime)\n* `modified`: Timestamp when this project was last modified. (datetime)\n* `name`: Name of this project. (string)\n* `description`: Optional description of this project. (string)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean)\n* `credential`: (id)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `scm_revision`: The last revision fetched by a project update (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n* `organization`: The organization used to determine access to this template. (id)\n* `scm_update_on_launch`: Update the project when a job is launched that uses the project. (boolean)\n* `scm_update_cache_timeout`: The number of seconds after the last project update ran that a new project update will be launched as a job dependency. (integer)\n* `allow_override`: Allow changing the SCM branch or revision in a job template that uses this project. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run using this project. (id)\n* `signature_validation_credential`: An optional credential used for validating files in the project against unexpected changes. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n## Sorting\n\nTo specify that projects are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Project" + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Project" + } + }, + "415": { + "examples": { + "application/json": { + "detail": "Unsupported media type \"text/html\" in request." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/projects/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this project. (integer)\n* `type`: Data type for this project. (choice)\n* `url`: URL for this project. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this project was created. (datetime)\n* `modified`: Timestamp when this project was last modified. (datetime)\n* `name`: Name of this project. (string)\n* `description`: Optional description of this project. (string)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean)\n* `credential`: (id)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `scm_revision`: The last revision fetched by a project update (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n* `organization`: The organization used to determine access to this template. (id)\n* `scm_update_on_launch`: Update the project when a job is launched that uses the project. (boolean)\n* `scm_update_cache_timeout`: The number of seconds after the last project update ran that a new project update will be launched as a job dependency. (integer)\n* `allow_override`: Allow changing the SCM branch or revision in a job template that uses this project. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run using this project. (id)\n* `signature_validation_credential`: An optional credential used for validating files in the project against unexpected changes. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n\n\n# Update a Project:\n\nMake a PUT or PATCH request to this resource to update this\nproject. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this project. (string, required)\n* `description`: Optional description of this project. (string, default=`\"\"`)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string, default=`\"\"`)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual (default)\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string, default=`\"\"`)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string, default=`\"\"`)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string, default=`\"\"`)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean, default=`False`)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean, default=`False`)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean, default=`False`)\n* `credential`: (id, default=``)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer, default=`0`)\n\n\n\n\n\n* `organization`: The organization used to determine access to this template. (id, default=``)\n* `scm_update_on_launch`: Update the project when a job is launched that uses the project. (boolean, default=`False`)\n* `scm_update_cache_timeout`: The number of seconds after the last project update ran that a new project update will be launched as a job dependency. (integer, default=`0`)\n* `allow_override`: Allow changing the SCM branch or revision in a job template that uses this project. (boolean, default=`False`)\n\n* `default_environment`: The default execution environment for jobs run using this project. (id, default=``)\n* `signature_validation_credential`: An optional credential used for validating files in the project against unexpected changes. (id, default=``)\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Project:\n\nMake a DELETE request to this resource to delete this project.", + "operationId": "api_projects_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single project", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this project. (integer)\n* `type`: Data type for this project. (choice)\n* `url`: URL for this project. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this project was created. (datetime)\n* `modified`: Timestamp when this project was last modified. (datetime)\n* `name`: Name of this project. (string)\n* `description`: Optional description of this project. (string)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean)\n* `credential`: (id)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `scm_revision`: The last revision fetched by a project update (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n* `organization`: The organization used to determine access to this template. (id)\n* `scm_update_on_launch`: Update the project when a job is launched that uses the project. (boolean)\n* `scm_update_cache_timeout`: The number of seconds after the last project update ran that a new project update will be launched as a job dependency. (integer)\n* `allow_override`: Allow changing the SCM branch or revision in a job template that uses this project. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run using this project. (id)\n* `signature_validation_credential`: An optional credential used for validating files in the project against unexpected changes. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n\n\n# Update a Project:\n\nMake a PUT or PATCH request to this resource to update this\nproject. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this project. (string, required)\n* `description`: Optional description of this project. (string, default=`\"\"`)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string, default=`\"\"`)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual (default)\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string, default=`\"\"`)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string, default=`\"\"`)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string, default=`\"\"`)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean, default=`False`)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean, default=`False`)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean, default=`False`)\n* `credential`: (id, default=``)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer, default=`0`)\n\n\n\n\n\n* `organization`: The organization used to determine access to this template. (id, default=``)\n* `scm_update_on_launch`: Update the project when a job is launched that uses the project. (boolean, default=`False`)\n* `scm_update_cache_timeout`: The number of seconds after the last project update ran that a new project update will be launched as a job dependency. (integer, default=`0`)\n* `allow_override`: Allow changing the SCM branch or revision in a job template that uses this project. (boolean, default=`False`)\n\n* `default_environment`: The default execution environment for jobs run using this project. (id, default=``)\n* `signature_validation_credential`: An optional credential used for validating files in the project against unexpected changes. (id, default=``)\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Project:\n\nMake a DELETE request to this resource to delete this project.", + "operationId": "api_projects_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Project" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single project", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this project. (integer)\n* `type`: Data type for this project. (choice)\n* `url`: URL for this project. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this project was created. (datetime)\n* `modified`: Timestamp when this project was last modified. (datetime)\n* `name`: Name of this project. (string)\n* `description`: Optional description of this project. (string)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean)\n* `credential`: (id)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `scm_revision`: The last revision fetched by a project update (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n* `organization`: The organization used to determine access to this template. (id)\n* `scm_update_on_launch`: Update the project when a job is launched that uses the project. (boolean)\n* `scm_update_cache_timeout`: The number of seconds after the last project update ran that a new project update will be launched as a job dependency. (integer)\n* `allow_override`: Allow changing the SCM branch or revision in a job template that uses this project. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run using this project. (id)\n* `signature_validation_credential`: An optional credential used for validating files in the project against unexpected changes. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n\n\n# Update a Project:\n\nMake a PUT or PATCH request to this resource to update this\nproject. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this project. (string, required)\n* `description`: Optional description of this project. (string, default=`\"\"`)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string, default=`\"\"`)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual (default)\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string, default=`\"\"`)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string, default=`\"\"`)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string, default=`\"\"`)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean, default=`False`)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean, default=`False`)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean, default=`False`)\n* `credential`: (id, default=``)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer, default=`0`)\n\n\n\n\n\n* `organization`: The organization used to determine access to this template. (id, default=``)\n* `scm_update_on_launch`: Update the project when a job is launched that uses the project. (boolean, default=`False`)\n* `scm_update_cache_timeout`: The number of seconds after the last project update ran that a new project update will be launched as a job dependency. (integer, default=`0`)\n* `allow_override`: Allow changing the SCM branch or revision in a job template that uses this project. (boolean, default=`False`)\n\n* `default_environment`: The default execution environment for jobs run using this project. (id, default=``)\n* `signature_validation_credential`: An optional credential used for validating files in the project against unexpected changes. (id, default=``)\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Project:\n\nMake a DELETE request to this resource to delete this project.", + "operationId": "api_projects_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "name": "foooooo" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "allow_override": false, + "created": "2018-02-01T08:00:00.000000Z", + "credential": null, + "custom_virtualenv": null, + "default_environment": null, + "description": "", + "id": 1, + "last_job_failed": false, + "last_job_run": null, + "last_update_failed": false, + "last_updated": null, + "local_path": "", + "modified": "2018-02-01T08:00:00.000000Z", + "name": "foooooo", + "next_job_run": null, + "organization": 1, + "related": { + "access_list": "/api/v2/projects/1/access_list/", + "activity_stream": "/api/v2/projects/1/activity_stream/", + "copy": "/api/v2/projects/1/copy/", + "inventory_files": "/api/v2/projects/1/inventories/", + "notification_templates_error": "/api/v2/projects/1/notification_templates_error/", + "notification_templates_started": "/api/v2/projects/1/notification_templates_started/", + "notification_templates_success": "/api/v2/projects/1/notification_templates_success/", + "object_roles": "/api/v2/projects/1/object_roles/", + "organization": "/api/v2/organizations/1/", + "playbooks": "/api/v2/projects/1/playbooks/", + "project_updates": "/api/v2/projects/1/project_updates/", + "schedules": "/api/v2/projects/1/schedules/", + "scm_inventory_sources": "/api/v2/projects/1/scm_inventory_sources/", + "teams": "/api/v2/projects/1/teams/", + "update": "/api/v2/projects/1/update/" + }, + "scm_branch": "", + "scm_clean": false, + "scm_delete_on_update": false, + "scm_refspec": "", + "scm_revision": "", + "scm_track_submodules": false, + "scm_type": "", + "scm_update_cache_timeout": 0, + "scm_update_on_launch": false, + "scm_url": "", + "signature_validation_credential": null, + "status": "missing", + "summary_fields": { + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the project", + "id": 16, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the project", + "id": 19, + "name": "Read" + }, + "update_role": { + "description": "May update the project", + "id": 18, + "name": "Update" + }, + "use_role": { + "description": "Can use the project in a job template", + "id": 17, + "name": "Use" + } + }, + "organization": { + "description": "test-org-desc", + "id": 1, + "name": "test-org" + }, + "user_capabilities": { + "copy": false, + "delete": true, + "edit": true, + "schedule": false, + "start": false + } + }, + "timeout": 0, + "type": "project", + "url": "/api/v2/projects/1/" + } + }, + "schema": { + "$ref": "#/definitions/Project" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single project", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this project. (integer)\n* `type`: Data type for this project. (choice)\n* `url`: URL for this project. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this project was created. (datetime)\n* `modified`: Timestamp when this project was last modified. (datetime)\n* `name`: Name of this project. (string)\n* `description`: Optional description of this project. (string)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean)\n* `credential`: (id)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `scm_revision`: The last revision fetched by a project update (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n* `organization`: The organization used to determine access to this template. (id)\n* `scm_update_on_launch`: Update the project when a job is launched that uses the project. (boolean)\n* `scm_update_cache_timeout`: The number of seconds after the last project update ran that a new project update will be launched as a job dependency. (integer)\n* `allow_override`: Allow changing the SCM branch or revision in a job template that uses this project. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run using this project. (id)\n* `signature_validation_credential`: An optional credential used for validating files in the project against unexpected changes. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n\n\n# Update a Project:\n\nMake a PUT or PATCH request to this resource to update this\nproject. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this project. (string, required)\n* `description`: Optional description of this project. (string, default=`\"\"`)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string, default=`\"\"`)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual (default)\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string, default=`\"\"`)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string, default=`\"\"`)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string, default=`\"\"`)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean, default=`False`)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean, default=`False`)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean, default=`False`)\n* `credential`: (id, default=``)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer, default=`0`)\n\n\n\n\n\n* `organization`: The organization used to determine access to this template. (id, default=``)\n* `scm_update_on_launch`: Update the project when a job is launched that uses the project. (boolean, default=`False`)\n* `scm_update_cache_timeout`: The number of seconds after the last project update ran that a new project update will be launched as a job dependency. (integer, default=`0`)\n* `allow_override`: Allow changing the SCM branch or revision in a job template that uses this project. (boolean, default=`False`)\n\n* `default_environment`: The default execution environment for jobs run using this project. (id, default=``)\n* `signature_validation_credential`: An optional credential used for validating files in the project against unexpected changes. (id, default=``)\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Project:\n\nMake a DELETE request to this resource to delete this project.", + "operationId": "api_projects_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Project" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Project" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single project", + "tags": [ + "api" + ] + } + }, + "/api/v2/projects/{id}/access_list/": { + "get": { + "description": "users.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_access_list_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ResourceAccessListElement" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/projects/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\nproject.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/projects/{id}/copy/": { + "get": { + "description": "", + "operationId": "api_projects_copy_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Copy" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "No Description for get on /api/{version}/projects/{id}/copy/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_projects_copy_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Copy" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Copy" + } + } + }, + "summary": "No Description for post on /api/{version}/projects/{id}/copy/", + "tags": [ + "api" + ] + } + }, + "/api/v2/projects/{id}/inventories/": { + "get": { + "description": "record containing the following fields:\n\n* `inventory_files`: Array of inventory files and directories available within this project, not comprehensive. (json)", + "operationId": "api_projects_inventories_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/ProjectInventories" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single project", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/projects/{id}/notification_templates_error/": { + "get": { + "description": "notification templates associated with the selected\nproject.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_notification_templates_error_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\nproject.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_notification_templates_error_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/projects/{id}/notification_templates_started/": { + "get": { + "description": "notification templates associated with the selected\nproject.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_notification_templates_started_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\nproject.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_notification_templates_started_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/projects/{id}/notification_templates_success/": { + "get": { + "description": "notification templates associated with the selected\nproject.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_notification_templates_success_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\nproject.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_notification_templates_success_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/projects/{id}/object_roles/": { + "get": { + "description": "roles associated with the selected\nproject.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of roles\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more role records. \n\n## Results\n\nEach role data structure includes the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)\n\n\n\n## Sorting\n\nTo specify that roles are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_object_roles_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Role" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/projects/{id}/playbooks/": { + "get": { + "description": "for a project.", + "operationId": "api_projects_playbooks_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/ProjectPlaybooks" + } + } + }, + "summary": "Make GET request to this resource to retrieve a list of playbooks available", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/projects/{id}/project_updates/": { + "get": { + "description": "project updates associated with the selected\nproject.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of project updates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more project update records. \n\n## Results\n\nEach project update data structure includes the following fields:\n\n* `id`: Database ID for this project update. (integer)\n* `type`: Data type for this project update. (choice)\n* `url`: URL for this project update. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this project update was created. (datetime)\n* `modified`: Timestamp when this project update was last modified. (datetime)\n* `name`: Name of this project update. (string)\n* `description`: Optional description of this project update. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean)\n* `credential`: (id)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `scm_revision`: The SCM Revision discovered by this update for the given project and branch. (string)\n* `project`: (id)\n* `job_type`: (choice)\n - `run`: Run\n - `check`: Check\n* `job_tags`: Parts of the project update playbook that will be run. (string)\n\n\n\n## Sorting\n\nTo specify that project updates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_project_updates_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ProjectUpdateList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/projects/{id}/schedules/": { + "get": { + "description": "schedules associated with the selected\nproject.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of schedules\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more schedule records. \n\n## Results\n\nEach schedule data structure includes the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n## Sorting\n\nTo specify that schedules are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_schedules_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Schedule" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "schedules associated with the selected\nproject.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of schedules\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more schedule records. \n\n## Results\n\nEach schedule data structure includes the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n## Sorting\n\nTo specify that schedules are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_schedules_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Schedule" + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Schedule" + } + }, + "400": { + "examples": { + "application/json": { + "extra_data": [ + "Field is not allowed on launch." + ] + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/projects/{id}/scm_inventory_sources/": { + "get": { + "description": "inventory sources associated with the selected\nproject.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of inventory sources\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more inventory source records. \n\n## Results\n\nEach inventory source data structure includes the following fields:\n\n* `id`: Database ID for this inventory source. (integer)\n* `type`: Data type for this inventory source. (choice)\n* `url`: URL for this inventory source. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this inventory source was created. (datetime)\n* `modified`: Timestamp when this inventory source was last modified. (datetime)\n* `name`: Name of this inventory source. (string)\n* `description`: Optional description of this inventory source. (string)\n* `source`: (choice)\n - `file`: File, Directory or Script\n - `constructed`: Template additional groups and hostvars at runtime\n - `scm`: Sourced from a Project\n - `ec2`: Amazon EC2\n - `gce`: Google Compute Engine\n - `azure_rm`: Microsoft Azure Resource Manager\n - `vmware`: VMware vCenter\n - `satellite6`: Red Hat Satellite 6\n - `openstack`: OpenStack\n - `rhv`: Red Hat Virtualization\n - `controller`: Red Hat Ansible Automation Platform\n - `insights`: Red Hat Insights\n* `source_path`: (string)\n* `source_vars`: Inventory source variables in YAML or JSON format. (string)\n* `scm_branch`: Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true. (string)\n* `credential`: Cloud credential to use for inventory updates. (integer)\n* `enabled_var`: Retrieve the enabled state from the given dict of host variables. The enabled variable may be specified as "foo.bar", in which case the lookup will traverse into nested dicts, equivalent to: from_dict.get("foo", {}).get("bar", default) (string)\n* `enabled_value`: Only used when enabled_var is set. Value when the host is considered enabled. For example if enabled_var="status.power_state"and enabled_value="powered_on" with host variables:{ "status": { "power_state": "powered_on", "created": "2018-02-01T08:00:00.000000Z:00", "healthy": true }, "name": "foobar", "ip_address": "192.168.2.1"}The host would be marked enabled. If power_state where any value other than powered_on then the host would be disabled when imported. If the key is not found then the host will be enabled (string)\n* `host_filter`: This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported. (string)\n* `overwrite`: Overwrite local groups and hosts from remote inventory source. (boolean)\n* `overwrite_vars`: Overwrite local variables from remote inventory source. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `verbosity`: (choice)\n - `0`: 0 (WARNING)\n - `1`: 1 (INFO)\n - `2`: 2 (DEBUG)\n* `limit`: Enter host, group or pattern match (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `none`: No External Source\n* `execution_environment`: The container image to be used for execution. (id)\n* `inventory`: (id)\n* `update_on_launch`: (boolean)\n* `update_cache_timeout`: (integer)\n* `source_project`: Project containing inventory file used as source. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n## Sorting\n\nTo specify that inventory sources are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_scm_inventory_sources_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InventorySource" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/projects/{id}/teams/": { + "get": { + "description": "teams.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of teams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more team records. \n\n## Results\n\nEach team data structure includes the following fields:\n\n* `id`: Database ID for this team. (integer)\n* `type`: Data type for this team. (choice)\n* `url`: URL for this team. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this team was created. (datetime)\n* `modified`: Timestamp when this team was last modified. (datetime)\n* `name`: Name of this team. (string)\n* `description`: Optional description of this team. (string)\n* `organization`: (id)\n\n\n\n## Sorting\n\nTo specify that teams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_projects_teams_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Team" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/projects/{id}/update/": { + "get": { + "description": "from its SCM source. The response will include the following field:\n\n* `can_update`: Flag indicating if this project can be updated (boolean,\n read-only)\n\nMake a POST request to this resource to update the project. If the project\ncannot be updated, a 405 status code will be returned.", + "operationId": "api_projects_update_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/ProjectUpdateView" + } + } + }, + "summary": "Make a GET request to this resource to determine if the project can be updated", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "from its SCM source. The response will include the following field:\n\n* `can_update`: Flag indicating if this project can be updated (boolean,\n read-only)\n\nMake a POST request to this resource to update the project. If the project\ncannot be updated, a 405 status code will be returned.", + "operationId": "api_projects_update_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/ProjectUpdateView" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/ProjectUpdateView" + } + } + }, + "summary": "Make a GET request to this resource to determine if the project can be updated", + "tags": [ + "api" + ] + } + }, + "/api/v2/roles/": { + "get": { + "description": "roles.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of roles\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more role records. \n\n## Results\n\nEach role data structure includes the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)\n\n\n\n## Sorting\n\nTo specify that roles are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_roles_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Role" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/roles/{id}/": { + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)", + "operationId": "api_roles_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Role" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single role", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/roles/{id}/children/": { + "get": { + "description": "roles associated with the selected\nrole.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of roles\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more role records. \n\n## Results\n\nEach role data structure includes the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)\n\n\n\n## Sorting\n\nTo specify that roles are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_roles_children_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Role" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/roles/{id}/parents/": { + "get": { + "description": "roles associated with the selected\nrole.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of roles\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more role records. \n\n## Results\n\nEach role data structure includes the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)\n\n\n\n## Sorting\n\nTo specify that roles are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_roles_parents_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Role" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/roles/{id}/teams/": { + "get": { + "description": "teams associated with the selected\nrole.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of teams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more team records. \n\n## Results\n\nEach team data structure includes the following fields:\n\n* `id`: Database ID for this team. (integer)\n* `type`: Data type for this team. (choice)\n* `url`: URL for this team. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this team was created. (datetime)\n* `modified`: Timestamp when this team was last modified. (datetime)\n* `name`: Name of this team. (string)\n* `description`: Optional description of this team. (string)\n* `organization`: (id)\n\n\n\n## Sorting\n\nTo specify that teams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_roles_teams_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Team" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "teams associated with the selected\nrole.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of teams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more team records. \n\n## Results\n\nEach team data structure includes the following fields:\n\n* `id`: Database ID for this team. (integer)\n* `type`: Data type for this team. (choice)\n* `url`: URL for this team. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this team was created. (datetime)\n* `modified`: Timestamp when this team was last modified. (datetime)\n* `name`: Name of this team. (string)\n* `description`: Optional description of this team. (string)\n* `organization`: (id)\n\n\n\n## Sorting\n\nTo specify that teams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_roles_teams_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Team" + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Team" + } + }, + "400": { + "examples": { + "application/json": { + "detail": "Team matching query does not exist." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/roles/{id}/users/": { + "get": { + "description": "users associated with the selected\nrole.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_roles_users_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/User" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "users associated with the selected\nrole.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_roles_users_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/User" + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/User" + } + }, + "400": { + "examples": { + "application/json": { + "msg": "You cannot grant private credential access to another user" + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/schedules/": { + "get": { + "description": "schedules.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of schedules\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more schedule records. \n\n## Results\n\nEach schedule data structure includes the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n## Sorting\n\nTo specify that schedules are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_schedules_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Schedule" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "schedules.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of schedules\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more schedule records. \n\n## Results\n\nEach schedule data structure includes the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n## Sorting\n\nTo specify that schedules are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_schedules_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "name": "My Example Schedule", + "rrule": "DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1", + "unified_job_template": 1 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "diff_mode": null, + "dtend": "2015-11-17T05:00:00Z", + "dtstart": "2015-11-17T05:00:00Z", + "enabled": true, + "execution_environment": null, + "extra_data": {}, + "forks": null, + "id": 1, + "inventory": null, + "job_slice_count": null, + "job_tags": null, + "job_type": null, + "limit": null, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "My Example Schedule", + "next_run": null, + "related": { + "credentials": "/api/v2/schedules/1/credentials/", + "instance_groups": "/api/v2/schedules/1/instance_groups/", + "inventory": "/api/v2/inventories/1/", + "labels": "/api/v2/schedules/1/labels/", + "unified_job_template": "/api/v2/inventory_sources/1/", + "unified_jobs": "/api/v2/schedules/1/jobs/" + }, + "rrule": "DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1", + "scm_branch": null, + "skip_tags": null, + "summary_fields": { + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": true, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 1 + }, + "unified_job_template": { + "description": "", + "id": 1, + "name": "single-inv-src", + "unified_job_type": "inventory_update" + }, + "user_capabilities": { + "delete": false, + "edit": false + } + }, + "timeout": null, + "timezone": "UTC", + "type": "schedule", + "unified_job_template": 1, + "until": "", + "url": "/api/v2/schedules/1/", + "verbosity": null + } + }, + "schema": { + "$ref": "#/definitions/Schedule" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/schedules/preview/": { + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_schedules_preview_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "rrule": "DTSTART:20301219T130551Z RRULE:FREQ=MONTHLY;INTERVAL=1;BYDAY=SA;BYMONTHDAY=12,13,14,15,16,17,18" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "examples": { + "application/json": { + "local": [ + "2031-01-18T13:05:51Z", + "2031-02-15T13:05:51Z", + "2031-03-15T13:05:51Z", + "2031-04-12T13:05:51Z", + "2031-05-17T13:05:51Z", + "2031-06-14T13:05:51Z", + "2031-07-12T13:05:51Z", + "2031-08-16T13:05:51Z", + "2031-09-13T13:05:51Z", + "2031-10-18T13:05:51Z" + ], + "utc": [ + "2031-01-18T13:05:51Z", + "2031-02-15T13:05:51Z", + "2031-03-15T13:05:51Z", + "2031-04-12T13:05:51Z", + "2031-05-17T13:05:51Z", + "2031-06-14T13:05:51Z", + "2031-07-12T13:05:51Z", + "2031-08-16T13:05:51Z", + "2031-09-13T13:05:51Z", + "2031-10-18T13:05:51Z" + ] + } + } + }, + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/SchedulePreview" + } + } + }, + "summary": "No Description for post on /api/{version}/schedules/preview/", + "tags": [ + "api" + ] + } + }, + "/api/v2/schedules/zoneinfo/": { + "get": { + "description": "", + "operationId": "api_schedules_zoneinfo_list", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "links": { + "Africa/Asmara": "Africa/Addis_Ababa", + "Africa/Asmera": "Africa/Addis_Ababa", + "Africa/Bamako": "Africa/Abidjan", + "Africa/Banjul": "Africa/Abidjan", + "Africa/Brazzaville": "Africa/Bangui", + "Africa/Bujumbura": "Africa/Blantyre", + "Africa/Cairo": "Egypt", + "Africa/Conakry": "Africa/Abidjan", + "Africa/Dakar": "Africa/Abidjan", + "Africa/Dar_es_Salaam": "Africa/Addis_Ababa", + "Africa/Djibouti": "Africa/Addis_Ababa", + "Africa/Douala": "Africa/Bangui", + "Africa/Freetown": "Africa/Abidjan", + "Africa/Gaborone": "Africa/Blantyre", + "Africa/Harare": "Africa/Blantyre", + "Africa/Kampala": "Africa/Addis_Ababa", + "Africa/Kigali": "Africa/Blantyre", + "Africa/Kinshasa": "Africa/Bangui", + "Africa/Lagos": "Africa/Bangui", + "Africa/Libreville": "Africa/Bangui", + "Africa/Lome": "Africa/Abidjan", + "Africa/Luanda": "Africa/Bangui", + "Africa/Lubumbashi": "Africa/Blantyre", + "Africa/Lusaka": "Africa/Blantyre", + "Africa/Malabo": "Africa/Bangui", + "Africa/Maputo": "Africa/Blantyre", + "Africa/Maseru": "Africa/Johannesburg", + "Africa/Mbabane": "Africa/Johannesburg", + "Africa/Mogadishu": "Africa/Addis_Ababa", + "Africa/Nairobi": "Africa/Addis_Ababa", + "Africa/Niamey": "Africa/Bangui", + "Africa/Nouakchott": "Africa/Abidjan", + "Africa/Ouagadougou": "Africa/Abidjan", + "Africa/Porto-Novo": "Africa/Bangui", + "Africa/Timbuktu": "Africa/Abidjan", + "America/Adak": "US/Aleutian", + "America/Anchorage": "US/Alaska", + "America/Antigua": "America/Anguilla", + "America/Argentina/ComodRivadavia": "America/Argentina/Catamarca", + "America/Atka": "US/Aleutian", + "America/Buenos_Aires": "America/Argentina/Buenos_Aires", + "America/Catamarca": "America/Argentina/Catamarca", + "America/Chicago": "US/Central", + "America/Coral_Harbour": "America/Atikokan", + "America/Cordoba": "America/Argentina/Cordoba", + "America/Curacao": "America/Aruba", + "America/Denver": "US/Mountain", + "America/Detroit": "US/Michigan", + "America/Dominica": "America/Anguilla", + "America/Edmonton": "Canada/Mountain", + "America/Fort_Wayne": "US/East-Indiana", + "America/Grenada": "America/Anguilla", + "America/Guadeloupe": "America/Anguilla", + "America/Halifax": "Canada/Atlantic", + "America/Havana": "Cuba", + "America/Indiana/Indianapolis": "US/East-Indiana", + "America/Indiana/Knox": "US/Indiana-Starke", + "America/Indianapolis": "US/East-Indiana", + "America/Jamaica": "Jamaica", + "America/Jujuy": "America/Argentina/Jujuy", + "America/Knox_IN": "US/Indiana-Starke", + "America/Kralendijk": "America/Aruba", + "America/Los_Angeles": "US/Pacific", + "America/Louisville": "America/Kentucky/Louisville", + "America/Lower_Princes": "America/Aruba", + "America/Marigot": "America/Anguilla", + "America/Mendoza": "America/Argentina/Mendoza", + "America/Montreal": "Canada/Eastern", + "America/Montserrat": "America/Anguilla", + "America/New_York": "US/Eastern", + "America/Nuuk": "America/Godthab", + "America/Panama": "America/Cayman", + "America/Phoenix": "US/Arizona", + "America/Port_of_Spain": "America/Anguilla", + "America/Regina": "Canada/Saskatchewan", + "America/Rio_Branco": "America/Porto_Acre", + "America/Rosario": "America/Argentina/Cordoba", + "America/Santa_Isabel": "America/Ensenada", + "America/Shiprock": "US/Mountain", + "America/St_Barthelemy": "America/Anguilla", + "America/St_Johns": "Canada/Newfoundland", + "America/St_Kitts": "America/Anguilla", + "America/St_Lucia": "America/Anguilla", + "America/St_Thomas": "America/Anguilla", + "America/St_Vincent": "America/Anguilla", + "America/Tijuana": "America/Ensenada", + "America/Toronto": "Canada/Eastern", + "America/Tortola": "America/Anguilla", + "America/Vancouver": "Canada/Pacific", + "America/Virgin": "America/Anguilla", + "America/Whitehorse": "Canada/Yukon", + "America/Winnipeg": "Canada/Central", + "Antarctica/McMurdo": "NZ", + "Antarctica/South_Pole": "NZ", + "Asia/Ashkhabad": "Asia/Ashgabat", + "Asia/Chongqing": "PRC", + "Asia/Chungking": "PRC", + "Asia/Dhaka": "Asia/Dacca", + "Asia/Harbin": "PRC", + "Asia/Hong_Kong": "Hongkong", + "Asia/Istanbul": "Europe/Istanbul", + "Asia/Jerusalem": "Israel", + "Asia/Katmandu": "Asia/Kathmandu", + "Asia/Kolkata": "Asia/Calcutta", + "Asia/Kuwait": "Asia/Aden", + "Asia/Macau": "Asia/Macao", + "Asia/Muscat": "Asia/Dubai", + "Asia/Nicosia": "Europe/Nicosia", + "Asia/Phnom_Penh": "Asia/Bangkok", + "Asia/Qatar": "Asia/Bahrain", + "Asia/Riyadh": "Asia/Aden", + "Asia/Saigon": "Asia/Ho_Chi_Minh", + "Asia/Seoul": "ROK", + "Asia/Shanghai": "PRC", + "Asia/Singapore": "Singapore", + "Asia/Taipei": "ROC", + "Asia/Tel_Aviv": "Israel", + "Asia/Thimphu": "Asia/Thimbu", + "Asia/Tokyo": "Japan", + "Asia/Ujung_Pandang": "Asia/Makassar", + "Asia/Ulan_Bator": "Asia/Ulaanbaatar", + "Asia/Urumqi": "Asia/Kashgar", + "Asia/Vientiane": "Asia/Bangkok", + "Asia/Yangon": "Asia/Rangoon", + "Atlantic/Faroe": "Atlantic/Faeroe", + "Atlantic/Jan_Mayen": "Arctic/Longyearbyen", + "Atlantic/Reykjavik": "Iceland", + "Atlantic/St_Helena": "Africa/Abidjan", + "Australia/Canberra": "Australia/ACT", + "Australia/Hobart": "Australia/Currie", + "Australia/Lord_Howe": "Australia/LHI", + "Australia/NSW": "Australia/ACT", + "Australia/North": "Australia/Darwin", + "Australia/Queensland": "Australia/Brisbane", + "Australia/South": "Australia/Adelaide", + "Australia/Sydney": "Australia/ACT", + "Australia/Tasmania": "Australia/Currie", + "Australia/Victoria": "Australia/Melbourne", + "Australia/West": "Australia/Perth", + "Australia/Yancowinna": "Australia/Broken_Hill", + "Brazil/Acre": "America/Porto_Acre", + "Brazil/DeNoronha": "America/Noronha", + "Brazil/East": "America/Sao_Paulo", + "Brazil/West": "America/Manaus", + "Chile/Continental": "America/Santiago", + "Etc/GMT": "GMT-0", + "Etc/GMT+0": "GMT-0", + "Etc/GMT-0": "GMT-0", + "Etc/GMT0": "GMT-0", + "Etc/Greenwich": "GMT-0", + "Etc/UTC": "Etc/UCT", + "Etc/Universal": "Etc/UCT", + "Etc/Zulu": "Etc/UCT", + "Europe/Dublin": "Eire", + "Europe/Guernsey": "Europe/Belfast", + "Europe/Isle_of_Man": "Europe/Belfast", + "Europe/Jersey": "Europe/Belfast", + "Europe/Ljubljana": "Europe/Belgrade", + "Europe/London": "Europe/Belfast", + "Europe/Mariehamn": "Europe/Helsinki", + "Europe/Oslo": "Arctic/Longyearbyen", + "Europe/Podgorica": "Europe/Belgrade", + "Europe/Prague": "Europe/Bratislava", + "Europe/San_Marino": "Europe/Rome", + "Europe/Sarajevo": "Europe/Belgrade", + "Europe/Skopje": "Europe/Belgrade", + "Europe/Tiraspol": "Europe/Chisinau", + "Europe/Vaduz": "Europe/Busingen", + "Europe/Vatican": "Europe/Rome", + "Europe/Zagreb": "Europe/Belgrade", + "Europe/Zurich": "Europe/Busingen", + "GB": "Europe/Belfast", + "GB-Eire": "Europe/Belfast", + "GMT": "GMT-0", + "GMT+0": "GMT-0", + "GMT0": "GMT-0", + "Greenwich": "GMT-0", + "Indian/Antananarivo": "Africa/Addis_Ababa", + "Indian/Comoro": "Africa/Addis_Ababa", + "Indian/Mayotte": "Africa/Addis_Ababa", + "Iran": "Asia/Tehran", + "Libya": "Africa/Tripoli", + "Mexico/BajaNorte": "America/Ensenada", + "Mexico/BajaSur": "America/Mazatlan", + "Mexico/General": "America/Mexico_City", + "Navajo": "US/Mountain", + "Pacific/Auckland": "NZ", + "Pacific/Chatham": "NZ-CHAT", + "Pacific/Easter": "Chile/EasterIsland", + "Pacific/Honolulu": "US/Hawaii", + "Pacific/Johnston": "US/Hawaii", + "Pacific/Kwajalein": "Kwajalein", + "Pacific/Midway": "US/Samoa", + "Pacific/Pago_Pago": "US/Samoa", + "Pacific/Ponape": "Pacific/Pohnpei", + "Pacific/Saipan": "Pacific/Guam", + "Pacific/Samoa": "US/Samoa", + "Pacific/Truk": "Pacific/Chuuk", + "Pacific/Yap": "Pacific/Chuuk", + "Poland": "Europe/Warsaw", + "Portugal": "Europe/Lisbon", + "Turkey": "Europe/Istanbul", + "UCT": "Etc/UCT", + "UTC": "Etc/UCT", + "Universal": "Etc/UCT", + "W-SU": "Europe/Moscow", + "Zulu": "Etc/UCT" + }, + "zones": [ + "Africa/Abidjan", + "Africa/Accra", + "Africa/Addis_Ababa", + "Africa/Algiers", + "Africa/Asmara", + "Africa/Asmera", + "Africa/Bamako", + "Africa/Bangui", + "Africa/Banjul", + "Africa/Bissau", + "Africa/Blantyre", + "Africa/Brazzaville", + "Africa/Bujumbura", + "Africa/Cairo", + "Africa/Casablanca", + "Africa/Ceuta", + "Africa/Conakry", + "Africa/Dakar", + "Africa/Dar_es_Salaam", + "Africa/Djibouti", + "Africa/Douala", + "Africa/El_Aaiun", + "Africa/Freetown", + "Africa/Gaborone", + "Africa/Harare", + "Africa/Johannesburg", + "Africa/Juba", + "Africa/Kampala", + "Africa/Khartoum", + "Africa/Kigali", + "Africa/Kinshasa", + "Africa/Lagos", + "Africa/Libreville", + "Africa/Lome", + "Africa/Luanda", + "Africa/Lubumbashi", + "Africa/Lusaka", + "Africa/Malabo", + "Africa/Maputo", + "Africa/Maseru", + "Africa/Mbabane", + "Africa/Mogadishu", + "Africa/Monrovia", + "Africa/Nairobi", + "Africa/Ndjamena", + "Africa/Niamey", + "Africa/Nouakchott", + "Africa/Ouagadougou", + "Africa/Porto-Novo", + "Africa/Sao_Tome", + "Africa/Timbuktu", + "Africa/Tripoli", + "Africa/Tunis", + "Africa/Windhoek", + "America/Adak", + "America/Anchorage", + "America/Anguilla", + "America/Antigua", + "America/Araguaina", + "America/Argentina/Buenos_Aires", + "America/Argentina/Catamarca", + "America/Argentina/ComodRivadavia", + "America/Argentina/Cordoba", + "America/Argentina/Jujuy", + "America/Argentina/La_Rioja", + "America/Argentina/Mendoza", + "America/Argentina/Rio_Gallegos", + "America/Argentina/Salta", + "America/Argentina/San_Juan", + "America/Argentina/San_Luis", + "America/Argentina/Tucuman", + "America/Argentina/Ushuaia", + "America/Aruba", + "America/Asuncion", + "America/Atikokan", + "America/Atka", + "America/Bahia", + "America/Bahia_Banderas", + "America/Barbados", + "America/Belem", + "America/Belize", + "America/Blanc-Sablon", + "America/Boa_Vista", + "America/Bogota", + "America/Boise", + "America/Buenos_Aires", + "America/Cambridge_Bay", + "America/Campo_Grande", + "America/Cancun", + "America/Caracas", + "America/Catamarca", + "America/Cayenne", + "America/Cayman", + "America/Chicago", + "America/Chihuahua", + "America/Coral_Harbour", + "America/Cordoba", + "America/Costa_Rica", + "America/Creston", + "America/Cuiaba", + "America/Curacao", + "America/Danmarkshavn", + "America/Dawson", + "America/Dawson_Creek", + "America/Denver", + "America/Detroit", + "America/Dominica", + "America/Edmonton", + "America/Eirunepe", + "America/El_Salvador", + "America/Ensenada", + "America/Fort_Nelson", + "America/Fort_Wayne", + "America/Fortaleza", + "America/Glace_Bay", + "America/Godthab", + "America/Goose_Bay", + "America/Grand_Turk", + "America/Grenada", + "America/Guadeloupe", + "America/Guatemala", + "America/Guayaquil", + "America/Guyana", + "America/Halifax", + "America/Havana", + "America/Hermosillo", + "America/Indiana/Indianapolis", + "America/Indiana/Knox", + "America/Indiana/Marengo", + "America/Indiana/Petersburg", + "America/Indiana/Tell_City", + "America/Indiana/Vevay", + "America/Indiana/Vincennes", + "America/Indiana/Winamac", + "America/Indianapolis", + "America/Inuvik", + "America/Iqaluit", + "America/Jamaica", + "America/Jujuy", + "America/Juneau", + "America/Kentucky/Louisville", + "America/Kentucky/Monticello", + "America/Knox_IN", + "America/Kralendijk", + "America/La_Paz", + "America/Lima", + "America/Los_Angeles", + "America/Louisville", + "America/Lower_Princes", + "America/Maceio", + "America/Managua", + "America/Manaus", + "America/Marigot", + "America/Martinique", + "America/Matamoros", + "America/Mazatlan", + "America/Mendoza", + "America/Menominee", + "America/Merida", + "America/Metlakatla", + "America/Mexico_City", + "America/Miquelon", + "America/Moncton", + "America/Monterrey", + "America/Montevideo", + "America/Montreal", + "America/Montserrat", + "America/Nassau", + "America/New_York", + "America/Nipigon", + "America/Nome", + "America/Noronha", + "America/North_Dakota/Beulah", + "America/North_Dakota/Center", + "America/North_Dakota/New_Salem", + "America/Nuuk", + "America/Ojinaga", + "America/Panama", + "America/Pangnirtung", + "America/Paramaribo", + "America/Phoenix", + "America/Port-au-Prince", + "America/Port_of_Spain", + "America/Porto_Acre", + "America/Porto_Velho", + "America/Puerto_Rico", + "America/Punta_Arenas", + "America/Rainy_River", + "America/Rankin_Inlet", + "America/Recife", + "America/Regina", + "America/Resolute", + "America/Rio_Branco", + "America/Rosario", + "America/Santa_Isabel", + "America/Santarem", + "America/Santiago", + "America/Santo_Domingo", + "America/Sao_Paulo", + "America/Scoresbysund", + "America/Shiprock", + "America/Sitka", + "America/St_Barthelemy", + "America/St_Johns", + "America/St_Kitts", + "America/St_Lucia", + "America/St_Thomas", + "America/St_Vincent", + "America/Swift_Current", + "America/Tegucigalpa", + "America/Thule", + "America/Thunder_Bay", + "America/Tijuana", + "America/Toronto", + "America/Tortola", + "America/Vancouver", + "America/Virgin", + "America/Whitehorse", + "America/Winnipeg", + "America/Yakutat", + "America/Yellowknife", + "Antarctica/Casey", + "Antarctica/Davis", + "Antarctica/DumontDUrville", + "Antarctica/Macquarie", + "Antarctica/Mawson", + "Antarctica/McMurdo", + "Antarctica/Palmer", + "Antarctica/Rothera", + "Antarctica/South_Pole", + "Antarctica/Syowa", + "Antarctica/Troll", + "Antarctica/Vostok", + "Arctic/Longyearbyen", + "Asia/Aden", + "Asia/Almaty", + "Asia/Amman", + "Asia/Anadyr", + "Asia/Aqtau", + "Asia/Aqtobe", + "Asia/Ashgabat", + "Asia/Ashkhabad", + "Asia/Atyrau", + "Asia/Baghdad", + "Asia/Bahrain", + "Asia/Baku", + "Asia/Bangkok", + "Asia/Barnaul", + "Asia/Beirut", + "Asia/Bishkek", + "Asia/Brunei", + "Asia/Calcutta", + "Asia/Chita", + "Asia/Choibalsan", + "Asia/Chongqing", + "Asia/Chungking", + "Asia/Colombo", + "Asia/Dacca", + "Asia/Damascus", + "Asia/Dhaka", + "Asia/Dili", + "Asia/Dubai", + "Asia/Dushanbe", + "Asia/Famagusta", + "Asia/Gaza", + "Asia/Hanoi", + "Asia/Harbin", + "Asia/Hebron", + "Asia/Ho_Chi_Minh", + "Asia/Hong_Kong", + "Asia/Hovd", + "Asia/Irkutsk", + "Asia/Istanbul", + "Asia/Jakarta", + "Asia/Jayapura", + "Asia/Jerusalem", + "Asia/Kabul", + "Asia/Kamchatka", + "Asia/Karachi", + "Asia/Kashgar", + "Asia/Kathmandu", + "Asia/Katmandu", + "Asia/Khandyga", + "Asia/Kolkata", + "Asia/Krasnoyarsk", + "Asia/Kuala_Lumpur", + "Asia/Kuching", + "Asia/Kuwait", + "Asia/Macao", + "Asia/Macau", + "Asia/Magadan", + "Asia/Makassar", + "Asia/Manila", + "Asia/Muscat", + "Asia/Nicosia", + "Asia/Novokuznetsk", + "Asia/Novosibirsk", + "Asia/Omsk", + "Asia/Oral", + "Asia/Phnom_Penh", + "Asia/Pontianak", + "Asia/Pyongyang", + "Asia/Qatar", + "Asia/Qostanay", + "Asia/Qyzylorda", + "Asia/Rangoon", + "Asia/Riyadh", + "Asia/Saigon", + "Asia/Sakhalin", + "Asia/Samarkand", + "Asia/Seoul", + "Asia/Shanghai", + "Asia/Singapore", + "Asia/Srednekolymsk", + "Asia/Taipei", + "Asia/Tashkent", + "Asia/Tbilisi", + "Asia/Tehran", + "Asia/Tel_Aviv", + "Asia/Thimbu", + "Asia/Thimphu", + "Asia/Tokyo", + "Asia/Tomsk", + "Asia/Ujung_Pandang", + "Asia/Ulaanbaatar", + "Asia/Ulan_Bator", + "Asia/Urumqi", + "Asia/Ust-Nera", + "Asia/Vientiane", + "Asia/Vladivostok", + "Asia/Yakutsk", + "Asia/Yangon", + "Asia/Yekaterinburg", + "Asia/Yerevan", + "Atlantic/Azores", + "Atlantic/Bermuda", + "Atlantic/Canary", + "Atlantic/Cape_Verde", + "Atlantic/Faeroe", + "Atlantic/Faroe", + "Atlantic/Jan_Mayen", + "Atlantic/Madeira", + "Atlantic/Reykjavik", + "Atlantic/South_Georgia", + "Atlantic/St_Helena", + "Atlantic/Stanley", + "Australia/ACT", + "Australia/Adelaide", + "Australia/Brisbane", + "Australia/Broken_Hill", + "Australia/Canberra", + "Australia/Currie", + "Australia/Darwin", + "Australia/Eucla", + "Australia/Hobart", + "Australia/LHI", + "Australia/Lindeman", + "Australia/Lord_Howe", + "Australia/Melbourne", + "Australia/NSW", + "Australia/North", + "Australia/Perth", + "Australia/Queensland", + "Australia/South", + "Australia/Sydney", + "Australia/Tasmania", + "Australia/Victoria", + "Australia/West", + "Australia/Yancowinna", + "Brazil/Acre", + "Brazil/DeNoronha", + "Brazil/East", + "Brazil/West", + "CET", + "CST6CDT", + "Canada/Atlantic", + "Canada/Central", + "Canada/Eastern", + "Canada/Mountain", + "Canada/Newfoundland", + "Canada/Pacific", + "Canada/Saskatchewan", + "Canada/Yukon", + "Chile/Continental", + "Chile/EasterIsland", + "Cuba", + "EET", + "EST", + "EST5EDT", + "Egypt", + "Eire", + "Etc/GMT", + "Etc/GMT+0", + "Etc/GMT+1", + "Etc/GMT+10", + "Etc/GMT+11", + "Etc/GMT+12", + "Etc/GMT+2", + "Etc/GMT+3", + "Etc/GMT+4", + "Etc/GMT+5", + "Etc/GMT+6", + "Etc/GMT+7", + "Etc/GMT+8", + "Etc/GMT+9", + "Etc/GMT-0", + "Etc/GMT-1", + "Etc/GMT-10", + "Etc/GMT-11", + "Etc/GMT-12", + "Etc/GMT-13", + "Etc/GMT-14", + "Etc/GMT-2", + "Etc/GMT-3", + "Etc/GMT-4", + "Etc/GMT-5", + "Etc/GMT-6", + "Etc/GMT-7", + "Etc/GMT-8", + "Etc/GMT-9", + "Etc/GMT0", + "Etc/Greenwich", + "Etc/UCT", + "Etc/UTC", + "Etc/Universal", + "Etc/Zulu", + "Europe/Amsterdam", + "Europe/Andorra", + "Europe/Astrakhan", + "Europe/Athens", + "Europe/Belfast", + "Europe/Belgrade", + "Europe/Berlin", + "Europe/Bratislava", + "Europe/Brussels", + "Europe/Bucharest", + "Europe/Budapest", + "Europe/Busingen", + "Europe/Chisinau", + "Europe/Copenhagen", + "Europe/Dublin", + "Europe/Gibraltar", + "Europe/Guernsey", + "Europe/Helsinki", + "Europe/Isle_of_Man", + "Europe/Istanbul", + "Europe/Jersey", + "Europe/Kaliningrad", + "Europe/Kiev", + "Europe/Kirov", + "Europe/Lisbon", + "Europe/Ljubljana", + "Europe/London", + "Europe/Luxembourg", + "Europe/Madrid", + "Europe/Malta", + "Europe/Mariehamn", + "Europe/Minsk", + "Europe/Monaco", + "Europe/Moscow", + "Europe/Nicosia", + "Europe/Oslo", + "Europe/Paris", + "Europe/Podgorica", + "Europe/Prague", + "Europe/Riga", + "Europe/Rome", + "Europe/Samara", + "Europe/San_Marino", + "Europe/Sarajevo", + "Europe/Saratov", + "Europe/Simferopol", + "Europe/Skopje", + "Europe/Sofia", + "Europe/Stockholm", + "Europe/Tallinn", + "Europe/Tirane", + "Europe/Tiraspol", + "Europe/Ulyanovsk", + "Europe/Uzhgorod", + "Europe/Vaduz", + "Europe/Vatican", + "Europe/Vienna", + "Europe/Vilnius", + "Europe/Volgograd", + "Europe/Warsaw", + "Europe/Zagreb", + "Europe/Zaporozhye", + "Europe/Zurich", + "Factory", + "GB", + "GB-Eire", + "GMT", + "GMT+0", + "GMT-0", + "GMT0", + "Greenwich", + "HST", + "Hongkong", + "Iceland", + "Indian/Antananarivo", + "Indian/Chagos", + "Indian/Christmas", + "Indian/Cocos", + "Indian/Comoro", + "Indian/Kerguelen", + "Indian/Mahe", + "Indian/Maldives", + "Indian/Mauritius", + "Indian/Mayotte", + "Indian/Reunion", + "Iran", + "Israel", + "Jamaica", + "Japan", + "Kwajalein", + "Libya", + "MET", + "MST", + "MST7MDT", + "Mexico/BajaNorte", + "Mexico/BajaSur", + "Mexico/General", + "NZ", + "NZ-CHAT", + "Navajo", + "PRC", + "PST8PDT", + "Pacific/Apia", + "Pacific/Auckland", + "Pacific/Bougainville", + "Pacific/Chatham", + "Pacific/Chuuk", + "Pacific/Easter", + "Pacific/Efate", + "Pacific/Enderbury", + "Pacific/Fakaofo", + "Pacific/Fiji", + "Pacific/Funafuti", + "Pacific/Galapagos", + "Pacific/Gambier", + "Pacific/Guadalcanal", + "Pacific/Guam", + "Pacific/Honolulu", + "Pacific/Johnston", + "Pacific/Kiritimati", + "Pacific/Kosrae", + "Pacific/Kwajalein", + "Pacific/Majuro", + "Pacific/Marquesas", + "Pacific/Midway", + "Pacific/Nauru", + "Pacific/Niue", + "Pacific/Norfolk", + "Pacific/Noumea", + "Pacific/Pago_Pago", + "Pacific/Palau", + "Pacific/Pitcairn", + "Pacific/Pohnpei", + "Pacific/Ponape", + "Pacific/Port_Moresby", + "Pacific/Rarotonga", + "Pacific/Saipan", + "Pacific/Samoa", + "Pacific/Tahiti", + "Pacific/Tarawa", + "Pacific/Tongatapu", + "Pacific/Truk", + "Pacific/Wake", + "Pacific/Wallis", + "Pacific/Yap", + "Poland", + "Portugal", + "ROC", + "ROK", + "Singapore", + "Turkey", + "UCT", + "US/Alaska", + "US/Aleutian", + "US/Arizona", + "US/Central", + "US/East-Indiana", + "US/Eastern", + "US/Hawaii", + "US/Indiana-Starke", + "US/Michigan", + "US/Mountain", + "US/Pacific", + "US/Samoa", + "UTC", + "Universal", + "W-SU", + "WET", + "Zulu" + ] + } + } + } + }, + "summary": "No Description for get on /api/{version}/schedules/zoneinfo/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/schedules/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n\n\n# Update a Schedule:\n\nMake a PUT or PATCH request to this resource to update this\nschedule. The following fields may be modified:\n\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string, required)\n\n\n\n\n\n\n\n* `name`: Name of this schedule. (string, required)\n* `description`: Optional description of this schedule. (string, default=`\"\"`)\n* `extra_data`: (json, default=`{}`)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id, default=``)\n* `scm_branch`: (string, default=`\"\"`)\n* `job_type`: (choice)\n - `None`: --------- (default)\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string, default=`\"\"`)\n* `skip_tags`: (string, default=`\"\"`)\n* `limit`: (string, default=`\"\"`)\n* `diff_mode`: (boolean, default=`None`)\n* `verbosity`: (choice)\n - `None`: --------- (default)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `forks`: (integer, default=`None`)\n* `job_slice_count`: (integer, default=`None`)\n* `timeout`: (integer, default=`None`)\n* `unified_job_template`: (id, required)\n* `enabled`: Enables processing of this schedule. (boolean, default=`True`)\n\n\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Schedule:\n\nMake a DELETE request to this resource to delete this schedule.", + "operationId": "api_schedules_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single schedule", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n\n\n# Update a Schedule:\n\nMake a PUT or PATCH request to this resource to update this\nschedule. The following fields may be modified:\n\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string, required)\n\n\n\n\n\n\n\n* `name`: Name of this schedule. (string, required)\n* `description`: Optional description of this schedule. (string, default=`\"\"`)\n* `extra_data`: (json, default=`{}`)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id, default=``)\n* `scm_branch`: (string, default=`\"\"`)\n* `job_type`: (choice)\n - `None`: --------- (default)\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string, default=`\"\"`)\n* `skip_tags`: (string, default=`\"\"`)\n* `limit`: (string, default=`\"\"`)\n* `diff_mode`: (boolean, default=`None`)\n* `verbosity`: (choice)\n - `None`: --------- (default)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `forks`: (integer, default=`None`)\n* `job_slice_count`: (integer, default=`None`)\n* `timeout`: (integer, default=`None`)\n* `unified_job_template`: (id, required)\n* `enabled`: Enables processing of this schedule. (boolean, default=`True`)\n\n\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Schedule:\n\nMake a DELETE request to this resource to delete this schedule.", + "operationId": "api_schedules_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Schedule" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single schedule", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n\n\n# Update a Schedule:\n\nMake a PUT or PATCH request to this resource to update this\nschedule. The following fields may be modified:\n\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string, required)\n\n\n\n\n\n\n\n* `name`: Name of this schedule. (string, required)\n* `description`: Optional description of this schedule. (string, default=`\"\"`)\n* `extra_data`: (json, default=`{}`)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id, default=``)\n* `scm_branch`: (string, default=`\"\"`)\n* `job_type`: (choice)\n - `None`: --------- (default)\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string, default=`\"\"`)\n* `skip_tags`: (string, default=`\"\"`)\n* `limit`: (string, default=`\"\"`)\n* `diff_mode`: (boolean, default=`None`)\n* `verbosity`: (choice)\n - `None`: --------- (default)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `forks`: (integer, default=`None`)\n* `job_slice_count`: (integer, default=`None`)\n* `timeout`: (integer, default=`None`)\n* `unified_job_template`: (id, required)\n* `enabled`: Enables processing of this schedule. (boolean, default=`True`)\n\n\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Schedule:\n\nMake a DELETE request to this resource to delete this schedule.", + "operationId": "api_schedules_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "enabled": true + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "diff_mode": null, + "dtend": "2015-11-17T05:00:00Z", + "dtstart": "2015-11-17T05:00:00Z", + "enabled": true, + "execution_environment": null, + "extra_data": {}, + "forks": null, + "id": 1, + "inventory": null, + "job_slice_count": null, + "job_tags": null, + "job_type": null, + "limit": null, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "test sch", + "next_run": null, + "related": { + "credentials": "/api/v2/schedules/1/credentials/", + "instance_groups": "/api/v2/schedules/1/instance_groups/", + "inventory": "/api/v2/inventories/1/", + "labels": "/api/v2/schedules/1/labels/", + "project": "/api/v2/projects/1/", + "unified_job_template": "/api/v2/job_templates/2/", + "unified_jobs": "/api/v2/schedules/1/jobs/" + }, + "rrule": "DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1", + "scm_branch": null, + "skip_tags": null, + "summary_fields": { + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + }, + "unified_job_template": { + "description": "", + "id": 2, + "name": "test-jt", + "unified_job_type": "job" + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "timeout": null, + "timezone": "UTC", + "type": "schedule", + "unified_job_template": 2, + "until": "", + "url": "/api/v2/schedules/1/", + "verbosity": null + } + }, + "schema": { + "$ref": "#/definitions/Schedule" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single schedule", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n\n\n# Update a Schedule:\n\nMake a PUT or PATCH request to this resource to update this\nschedule. The following fields may be modified:\n\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string, required)\n\n\n\n\n\n\n\n* `name`: Name of this schedule. (string, required)\n* `description`: Optional description of this schedule. (string, default=`\"\"`)\n* `extra_data`: (json, default=`{}`)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id, default=``)\n* `scm_branch`: (string, default=`\"\"`)\n* `job_type`: (choice)\n - `None`: --------- (default)\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string, default=`\"\"`)\n* `skip_tags`: (string, default=`\"\"`)\n* `limit`: (string, default=`\"\"`)\n* `diff_mode`: (boolean, default=`None`)\n* `verbosity`: (choice)\n - `None`: --------- (default)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `forks`: (integer, default=`None`)\n* `job_slice_count`: (integer, default=`None`)\n* `timeout`: (integer, default=`None`)\n* `unified_job_template`: (id, required)\n* `enabled`: Enables processing of this schedule. (boolean, default=`True`)\n\n\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Schedule:\n\nMake a DELETE request to this resource to delete this schedule.", + "operationId": "api_schedules_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Schedule" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Schedule" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single schedule", + "tags": [ + "api" + ] + } + }, + "/api/v2/schedules/{id}/credentials/": { + "get": { + "description": "credentials associated with the selected\nschedule.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_schedules_credentials_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Credential" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "credentials associated with the selected\nschedule.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_schedules_credentials_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Credential" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Credential" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/schedules/{id}/instance_groups/": { + "get": { + "description": "instance groups associated with the selected\nschedule.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_schedules_instance_groups_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InstanceGroup" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "instance groups associated with the selected\nschedule.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_schedules_instance_groups_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/schedules/{id}/jobs/": { + "get": { + "description": "unified jobs associated with the selected\nschedule.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of unified jobs\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more unified job records. \n\n## Results\n\nEach unified job data structure includes the following fields:\n\n* `id`: Database ID for this unified job. (integer)\n* `type`: Data type for this unified job. (choice)\n* `url`: URL for this unified job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this unified job was created. (datetime)\n* `modified`: Timestamp when this unified job was last modified. (datetime)\n* `name`: Name of this unified job. (string)\n* `description`: Optional description of this unified job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n\n\n\n## Sorting\n\nTo specify that unified jobs are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_schedules_jobs_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/UnifiedJobList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/schedules/{id}/labels/": { + "get": { + "description": "labels associated with the selected\nschedule.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of labels\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more label records. \n\n## Results\n\nEach label data structure includes the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n## Sorting\n\nTo specify that labels are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_schedules_labels_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Label" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "labels associated with the selected\nschedule.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of labels\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more label records. \n\n## Results\n\nEach label data structure includes the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n## Sorting\n\nTo specify that labels are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_schedules_labels_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Label" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Label" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/settings/": { + "get": { + "description": "settings.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of settings\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more setting records. \n\n## Results\n\nEach setting data structure includes the following fields:\n\n* `url`: (string)\n* `slug`: (string)\n* `name`: (string)\n\n\n\n## Sorting\n\nTo specify that settings are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_settings_list", + "parameters": [ + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/SettingCategory" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/settings/logging/test/": { + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_settings_logging_test_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/SettingSingleton" + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/SettingSingleton" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + }, + "409": { + "examples": { + "application/json": { + "error": "Logging not enabled" + } + } + } + }, + "summary": " Test Logging Configuration", + "tags": [ + "api" + ] + } + }, + "/api/v2/settings/{category_slug}/": { + "delete": { + "description": "record containing the following fields:\n\n* `ACTIVITY_STREAM_ENABLED`: Enable capturing activity for the activity stream. (boolean)\n* `ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC`: Enable capturing activity for the activity stream when running inventory sync. (boolean)\n* `ORG_ADMINS_CAN_SEE_ALL_USERS`: Controls whether any Organization Admin can view all users and teams, even those not associated with their Organization. (boolean)\n* `MANAGE_ORGANIZATION_AUTH`: Controls whether any Organization Admin has the privileges to create and manage users and teams. You may want to disable this ability if you are using an LDAP or SAML integration. (boolean)\n* `TOWER_URL_BASE`: This setting is used by services like notifications to render a valid url to the service. (string)\n* `REMOTE_HOST_HEADERS`: HTTP headers and meta keys to search to determine remote host name or IP. Add additional items to this list, such as "HTTP_X_FORWARDED_FOR", if behind a reverse proxy. See the "Proxy Support" section of the AAP Installation guide for more details. (list)\n* `PROXY_IP_ALLOWED_LIST`: If the service is behind a reverse proxy/load balancer, use this setting to configure the proxy IP addresses from which the service should trust custom REMOTE_HOST_HEADERS header values. If this setting is an empty list (the default), the headers specified by REMOTE_HOST_HEADERS will be trusted unconditionally') (list)\n* `CSRF_TRUSTED_ORIGINS`: If the service is behind a reverse proxy/load balancer, use this setting to configure the schema://addresses from which the service should trust Origin header values. (list)\n* `LICENSE`: The license controls which features and functionality are enabled. Use /api/v2/config/ to update or change the license. (nested object)\n* `REDHAT_USERNAME`: This username is used to send data to Automation Analytics (string)\n* `REDHAT_PASSWORD`: This password is used to send data to Automation Analytics (string)\n* `SUBSCRIPTIONS_USERNAME`: This username is used to retrieve subscription and content information (string)\n* `SUBSCRIPTIONS_PASSWORD`: This password is used to retrieve subscription and content information (string)\n* `AUTOMATION_ANALYTICS_URL`: This setting is used to to configure the upload URL for data collection for Automation Analytics. (string)\n* `INSTALL_UUID`: (string)\n* `DEFAULT_CONTROL_PLANE_QUEUE_NAME`: (string)\n* `DEFAULT_EXECUTION_QUEUE_NAME`: (string)\n* `DEFAULT_EXECUTION_ENVIRONMENT`: The Execution Environment to be used when one has not been configured for a job template. (field)\n* `CUSTOM_VENV_PATHS`: Paths where Tower will look for custom virtual environments (in addition to /var/lib/awx/venv/). Enter one path per line. (list)\n* `AD_HOC_COMMANDS`: List of modules allowed to be used by ad-hoc jobs. (list)\n* `ALLOW_JINJA_IN_EXTRA_VARS`: Ansible allows variable substitution via the Jinja2 templating language for --extra-vars. This poses a potential security risk where users with the ability to specify extra vars at job launch time can use Jinja2 templates to run arbitrary Python. It is recommended that this value be set to "template" or "never". (choice)\n - `always`: Always\n - `never`: Never\n - `template`: Only On Job Template Definitions\n* `AWX_ISOLATION_BASE_PATH`: The directory in which the service will create new temporary directories for job execution and isolation (such as credential files). (string)\n* `AWX_ISOLATION_SHOW_PATHS`: List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. Volumes will be mounted from the execution node to the container. The supported format is HOST-DIR[:CONTAINER-DIR[:OPTIONS]]. (list)\n* `AWX_TASK_ENV`: Additional environment variables set for playbook runs, inventory updates, project updates, and notification sending. (nested object)\n* `AWX_RUNNER_KEEPALIVE_SECONDS`: Only applies to jobs running in a Container Group. If not 0, send a message every so-many seconds to keep connection open. (integer)\n* `GALAXY_TASK_ENV`: Additional environment variables set for invocations of ansible-galaxy within project updates. Useful if you must use a proxy server for ansible-galaxy but not git. (nested object)\n* `INSIGHTS_TRACKING_STATE`: Enables the service to gather data on automation and send it to Automation Analytics. (boolean)\n* `PROJECT_UPDATE_VVV`: Adds the CLI -vvv flag to ansible-playbook runs of project_update.yml used for project updates. (boolean)\n* `AWX_ROLES_ENABLED`: Allows roles to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean)\n* `AWX_COLLECTIONS_ENABLED`: Allows collections to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean)\n* `AWX_SHOW_PLAYBOOK_LINKS`: Follow symbolic links when scanning for playbooks. Be aware that setting this to True can lead to infinite recursion if a link points to a parent directory of itself. (boolean)\n* `AWX_MOUNT_ISOLATED_PATHS_ON_K8S`: Expose paths via hostPath for the Pods created by a Container Group. HostPath volumes present many security risks, and it is a best practice to avoid the use of HostPaths when possible. (boolean)\n* `GALAXY_IGNORE_CERTS`: If set to true, certificate validation will not be done when installing content from any Galaxy server. (boolean)\n* `STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display before requiring the output be downloaded. (integer)\n* `EVENT_STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display for a single job or ad hoc command event. `stdout` will end with `\u2026` when truncated. (integer)\n* `MAX_WEBSOCKET_EVENT_RATE`: Maximum number of messages to update the UI live job output with per second. Value of 0 means no limit. (integer)\n* `SCHEDULE_MAX_JOBS`: Maximum number of the same job template that can be waiting to run when launching from a schedule before no more are created. (integer)\n* `AWX_ANSIBLE_CALLBACK_PLUGINS`: List of paths to search for extra callback plugins to be used when running jobs. Enter one path per line. (list)\n* `DEFAULT_JOB_TIMEOUT`: Maximum time in seconds to allow jobs to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual job template will override this. (integer)\n* `DEFAULT_JOB_IDLE_TIMEOUT`: If no output is detected from ansible in this number of seconds the execution will be terminated. Use value of 0 to indicate that no idle timeout should be imposed. (integer)\n* `DEFAULT_INVENTORY_UPDATE_TIMEOUT`: Maximum time in seconds to allow inventory updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual inventory source will override this. (integer)\n* `DEFAULT_PROJECT_UPDATE_TIMEOUT`: Maximum time in seconds to allow project updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual project will override this. (integer)\n* `ANSIBLE_FACT_CACHE_TIMEOUT`: Maximum time, in seconds, that stored Ansible facts are considered valid since the last time they were modified. Only valid, non-stale, facts will be accessible by a playbook. Note, this does not influence the deletion of ansible_facts from the database. Use a value of 0 to indicate that no timeout should be imposed. (integer)\n* `MAX_FORKS`: Saving a Job Template with more than this number of forks will result in an error. When set to 0, no limit is applied. (integer)\n* `LOG_AGGREGATOR_HOST`: Hostname/IP where external logs will be sent to. (string)\n* `LOG_AGGREGATOR_PORT`: Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator). (integer)\n* `LOG_AGGREGATOR_TYPE`: Format messages for the chosen log aggregator. (choice)\n - `None`: ---------\n - `logstash`\n - `splunk`\n - `loggly`\n - `sumologic`\n - `other`\n* `LOG_AGGREGATOR_USERNAME`: Username for external log aggregator (if required; HTTP/s only). (string)\n* `LOG_AGGREGATOR_PASSWORD`: Password or authentication token for external log aggregator (if required; HTTP/s only). (string)\n* `LOG_AGGREGATOR_LOGGERS`: List of loggers that will send HTTP logs to the collector, these can include any or all of: \nawx - service logs\nactivity_stream - activity stream records\njob_events - callback data from Ansible job events\nsystem_tracking - facts gathered from scan jobs\nbroadcast_websocket - errors pertaining to websockets broadcast metrics\n (list)\n* `LOG_AGGREGATOR_INDIVIDUAL_FACTS`: If set, system tracking facts will be sent for each package, service, or other item found in a scan, allowing for greater search query granularity. If unset, facts will be sent as a single dictionary, allowing for greater efficiency in fact processing. (boolean)\n* `LOG_AGGREGATOR_ENABLED`: Enable sending logs to external log aggregator. (boolean)\n* `LOG_AGGREGATOR_TOWER_UUID`: Useful to uniquely identify instances. (string)\n* `LOG_AGGREGATOR_PROTOCOL`: Protocol used to communicate with log aggregator. HTTPS/HTTP assumes HTTPS unless http:// is explicitly used in the Logging Aggregator hostname. (choice)\n - `https`: HTTPS/HTTP\n - `tcp`: TCP\n - `udp`: UDP\n* `LOG_AGGREGATOR_TCP_TIMEOUT`: Number of seconds for a TCP connection to external log aggregator to timeout. Applies to HTTPS and TCP log aggregator protocols. (integer)\n* `LOG_AGGREGATOR_VERIFY_CERT`: Flag to control enable/disable of certificate verification when LOG_AGGREGATOR_PROTOCOL is "https". If enabled, the log handler will verify certificate sent by external log aggregator before establishing connection. (boolean)\n* `LOG_AGGREGATOR_LEVEL`: Level threshold used by log handler. Severities from lowest to highest are DEBUG, INFO, WARNING, ERROR, CRITICAL. Messages less severe than the threshold will be ignored by log handler. (messages under category awx.anlytics ignore this setting) (choice)\n - `DEBUG`\n - `INFO`\n - `WARNING`\n - `ERROR`\n - `CRITICAL`\n* `LOG_AGGREGATOR_ACTION_QUEUE_SIZE`: Defines how large the rsyslog action queue can grow in number of messages stored. This can have an impact on memory utilization. When the queue reaches 75% of this number, the queue will start writing to disk (queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and DEBUG messages will start to be discarded (queue.discardMark with queue.discardSeverity=5). (integer)\n* `LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB`: Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH. (integer)\n* `LOG_AGGREGATOR_MAX_DISK_USAGE_PATH`: Location to persist logs that should be retried after an outage of the external log aggregator (defaults to /var/lib/awx). Equivalent to the rsyslogd queue.spoolDirectory setting. (string)\n* `LOG_AGGREGATOR_RSYSLOGD_DEBUG`: Enabled high verbosity debugging for rsyslogd. Useful for debugging connection issues for external log aggregation. (boolean)\n* `API_400_ERROR_LOG_FORMAT`: The format of logged messages when an API 4XX error occurs, the following variables will be substituted: \nstatus_code - The HTTP status code of the error\nuser_name - The user name attempting to use the API\nurl_path - The URL path to the API endpoint called\nremote_addr - The remote address seen for the user\nerror - The error set by the api endpoint\nVariables need to be in the format {<variable name>}. (string)\n* `AUTOMATION_ANALYTICS_LAST_GATHER`: (datetime)\n* `AUTOMATION_ANALYTICS_LAST_ENTRIES`: (string)\n* `AUTOMATION_ANALYTICS_GATHER_INTERVAL`: Interval (in seconds) between data gathering. (integer)\n* `IS_K8S`: Indicates whether the instance is part of a kubernetes-based deployment. (boolean)\n* `BULK_JOB_MAX_LAUNCH`: Max jobs to allow bulk jobs to launch (integer)\n* `BULK_HOST_MAX_CREATE`: Max number of hosts to allow to be created in a single bulk action (integer)\n* `UI_NEXT`: Enable preview of new user interface. (boolean)\n* `SUBSCRIPTION_USAGE_MODEL`: (choice)\n - `\"\"`: Default model for AWX - no subscription. Deletion of host_metrics will not be considered for purposes of managed host counting\n - `unique_managed_hosts`: Usage based on unique managed nodes in a large historical time frame and delete functionality for no longer used managed nodes\n* `CLEANUP_HOST_METRICS_LAST_TS`: (datetime)\n* `HOST_METRIC_SUMMARY_TASK_LAST_TS`: (datetime)\n* `AWX_CLEANUP_PATHS`: Enable or Disable TMP Dir cleanup (boolean)\n* `AWX_REQUEST_PROFILE`: Debug web request python timing (boolean)\n* `DEFAULT_CONTAINER_RUN_OPTIONS`: List of options to pass to podman run example: ['--network', 'slirp4netns:enable_ipv6=true', '--log-level', 'debug'] (list)\n* `RECEPTOR_RELEASE_WORK`: Release receptor work (boolean)\n* `SESSION_COOKIE_AGE`: Number of seconds that a user is inactive before they will need to login again. (integer)\n* `SESSIONS_PER_USER`: Maximum number of simultaneous logged in sessions a user may have. To disable enter -1. (integer)\n* `DISABLE_LOCAL_AUTH`: Controls whether users are prevented from using the built-in authentication system. You probably want to do this if you are using an LDAP or SAML integration. (boolean)\n* `AUTH_BASIC_ENABLED`: Enable HTTP Basic Auth for the API Browser. (boolean)\n* `OAUTH2_PROVIDER`: Dictionary for customizing OAuth 2 timeouts, available items are `ACCESS_TOKEN_EXPIRE_SECONDS`, the duration of access tokens in the number of seconds, `AUTHORIZATION_CODE_EXPIRE_SECONDS`, the duration of authorization codes in the number of seconds, and `REFRESH_TOKEN_EXPIRE_SECONDS`, the duration of refresh tokens, after expired access tokens, in the number of seconds. (nested object)\n* `ALLOW_OAUTH2_FOR_EXTERNAL_USERS`: For security reasons, users from external auth providers (LDAP, SAML, SSO, Radius, and others) are not allowed to create OAuth2 tokens. To change this behavior, enable this setting. Existing tokens will not be deleted when this setting is toggled off. (boolean)\n* `LOGIN_REDIRECT_OVERRIDE`: URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page. (string)\n* `ALLOW_METRICS_FOR_ANONYMOUS_USERS`: If true, anonymous users are allowed to poll metrics. (boolean)\n* `PENDO_TRACKING_STATE`: Enable or Disable User Analytics Tracking. (choice)\n - `off`: Off\n - `anonymous`: Anonymous\n - `detailed`: Detailed\n* `CUSTOM_LOGIN_INFO`: If needed, you can add specific information (such as a legal notice or a disclaimer) to a text box in the login modal using this setting. Any content added must be in plain text or an HTML fragment, as other markup languages are not supported. (string)\n* `CUSTOM_LOGO`: To set up a custom logo, provide a file that you create. For the custom logo to look its best, use a .png file with a transparent background. GIF, PNG and JPEG formats are supported. (string)\n* `MAX_UI_JOB_EVENTS`: Maximum number of job events for the UI to retrieve within a single request. (integer)\n* `UI_LIVE_UPDATES_ENABLED`: If disabled, the page will not refresh when events are received. Reloading the page will be required to get the latest details. (boolean)\n* `AUTHENTICATION_BACKENDS`: List of authentication backends that are enabled based on license features and other authentication settings. (list)\n* `SOCIAL_AUTH_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_USER_FIELDS`: When set to an empty list `[]`, this setting prevents new user accounts from being created. Only users who have previously logged in using social auth or have a user account with a matching email address will be able to login. (list)\n* `SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL`: Enabling this setting will tell social auth to use the full Email as username instead of the full name (boolean)\n* `AUTH_LDAP_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_1_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_1_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_1_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_1_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_1_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_1_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_1_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_1_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_1_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_1_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_1_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_1_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_1_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_1_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_1_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_1_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_2_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_2_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_2_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_2_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_2_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_2_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_2_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_2_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_2_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_2_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_2_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_2_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_2_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_2_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_2_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_2_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_3_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_3_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_3_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_3_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_3_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_3_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_3_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_3_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_3_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_3_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_3_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_3_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_3_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_3_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_3_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_3_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_4_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_4_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_4_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_4_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_4_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_4_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_4_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_4_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_4_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_4_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_4_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_4_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_4_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_4_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_4_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_4_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_5_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_5_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_5_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_5_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_5_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_5_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_5_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_5_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_5_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_5_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_5_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_5_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_5_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_5_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_5_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_5_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `RADIUS_SERVER`: Hostname/IP of RADIUS server. RADIUS authentication is disabled if this setting is empty. (string)\n* `RADIUS_PORT`: Port of RADIUS server. (integer)\n* `RADIUS_SECRET`: Shared secret for authenticating to RADIUS server. (string)\n* `TACACSPLUS_HOST`: Hostname of TACACS+ server. (string)\n* `TACACSPLUS_PORT`: Port number of TACACS+ server. (integer)\n* `TACACSPLUS_SECRET`: Shared secret for authenticating to TACACS+ server. (string)\n* `TACACSPLUS_SESSION_TIMEOUT`: TACACS+ session timeout value in seconds, 0 disables timeout. (integer)\n* `TACACSPLUS_AUTH_PROTOCOL`: Choose the authentication protocol used by TACACS+ client. (choice)\n - `ascii`\n - `pap`\n* `TACACSPLUS_REM_ADDR`: Enable the client address sending by TACACS+ client. (boolean)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_KEY`: The OAuth2 key from your web application. (string)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET`: The OAuth2 secret from your web application. (string)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS`: Update this setting to restrict the domains who are allowed to login using Google OAuth2. (list)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_AUTH_EXTRA_ARGUMENTS`: Extra arguments for Google OAuth2 login. You can restrict it to only allow a single domain to authenticate, even if the user is logged in with multple Google accounts. Refer to the documentation for more detail. (nested object)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_KEY`: The OAuth2 key (Client ID) from your GitHub developer application. (string)\n* `SOCIAL_AUTH_GITHUB_SECRET`: The OAuth2 secret (Client Secret) from your GitHub developer application. (string)\n* `SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ORG_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_NAME`: The name of your GitHub organization, as used in your organization's URL: https://github.com/<yourorg>/. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_TEAM_CALLBACK_URL`: Create an organization-owned application at https://github.com/organizations/<yourorg>/settings/applications and obtain an OAuth2 key (Client ID) and secret (Client Secret). Provide this URL as the callback URL for your application. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_ID`: Find the numeric team ID using the Github API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise developer application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise developer application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME`: The name of your GitHub Enterprise organization, as used in your organization's URL: https://github.com/<yourorg>/. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_CALLBACK_URL`: Create an organization-owned application at https://github.com/organizations/<yourorg>/settings/applications and obtain an OAuth2 key (Client ID) and secret (Client Secret). Provide this URL as the callback URL for your application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID`: Find the numeric team ID using the Github Enterprise API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_KEY`: The OAuth2 key (Client ID) from your Azure AD application. (string)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET`: The OAuth2 secret (Client Secret) from your Azure AD application. (string)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_OIDC_KEY`: The OIDC key (Client ID) from your IDP. (string)\n* `SOCIAL_AUTH_OIDC_SECRET`: The OIDC secret (Client Secret) from your IDP. (string)\n* `SOCIAL_AUTH_OIDC_OIDC_ENDPOINT`: The URL for your OIDC provider including the path up to /.well-known/openid-configuration (string)\n* `SOCIAL_AUTH_OIDC_VERIFY_SSL`: Verify the OIDC provider ssl certificate. (boolean)\n* `SAML_AUTO_CREATE_OBJECTS`: When enabled (the default), mapped Organizations and Teams will be created automatically on successful SAML login. (boolean)\n* `SOCIAL_AUTH_SAML_CALLBACK_URL`: Register the service as a service provider (SP) with each identity provider (IdP) you have configured. Provide your SP Entity ID and this ACS URL for your application. (string)\n* `SOCIAL_AUTH_SAML_METADATA_URL`: If your identity provider (IdP) allows uploading an XML metadata file, you can download one from this URL. (string)\n* `SOCIAL_AUTH_SAML_SP_ENTITY_ID`: The application-defined unique identifier used as the audience of the SAML service provider (SP) configuration. This is usually the URL for the service. (string)\n* `SOCIAL_AUTH_SAML_SP_PUBLIC_CERT`: Create a keypair to use as a service provider (SP) and include the certificate content here. (string)\n* `SOCIAL_AUTH_SAML_SP_PRIVATE_KEY`: Create a keypair to use as a service provider (SP) and include the private key content here. (string)\n* `SOCIAL_AUTH_SAML_ORG_INFO`: Provide the URL, display name, and the name of your app. Refer to the documentation for example syntax. (nested object)\n* `SOCIAL_AUTH_SAML_TECHNICAL_CONTACT`: Provide the name and email address of the technical contact for your service provider. Refer to the documentation for example syntax. (nested object)\n* `SOCIAL_AUTH_SAML_SUPPORT_CONTACT`: Provide the name and email address of the support contact for your service provider. Refer to the documentation for example syntax. (nested object)\n* `SOCIAL_AUTH_SAML_ENABLED_IDPS`: Configure the Entity ID, SSO URL and certificate for each identity provider (IdP) in use. Multiple SAML IdPs are supported. Some IdPs may provide user data using attribute names that differ from the default OIDs. Attribute names may be overridden for each IdP. Refer to the Ansible documentation for additional details and syntax. (nested object)\n* `SOCIAL_AUTH_SAML_SECURITY_CONFIG`: A dict of key value pairs that are passed to the underlying python-saml security setting https://github.com/onelogin/python-saml#settings (nested object)\n* `SOCIAL_AUTH_SAML_SP_EXTRA`: A dict of key value pairs to be passed to the underlying python-saml Service Provider configuration setting. (nested object)\n* `SOCIAL_AUTH_SAML_EXTRA_DATA`: A list of tuples that maps IDP attributes to extra_attributes. Each attribute will be a list of values, even if only 1 value. (list)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_SAML_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_ATTR`: Used to translate user organization membership. (nested object)\n* `SOCIAL_AUTH_SAML_TEAM_ATTR`: Used to translate user team membership. (nested object)\n* `SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR`: Used to map super users and system auditors from SAML. (nested object)\n* `LOCAL_PASSWORD_MIN_LENGTH`: Minimum number of characters required in a local password. 0 means no minimum (integer)\n* `LOCAL_PASSWORD_MIN_DIGITS`: Minimum number of digit characters required in a local password. 0 means no minimum (integer)\n* `LOCAL_PASSWORD_MIN_UPPER`: Minimum number of uppercase characters required in a local password. 0 means no minimum (integer)\n* `LOCAL_PASSWORD_MIN_SPECIAL`: Minimum number of special characters required in a local password. 0 means no minimum (integer)\n\n\n\n\n\n# Update a Setting:\n\nMake a PUT or PATCH request to this resource to update this\nsetting. The following fields may be modified:\n\n\n* `ACTIVITY_STREAM_ENABLED`: Enable capturing activity for the activity stream. (boolean, required)\n* `ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC`: Enable capturing activity for the activity stream when running inventory sync. (boolean, required)\n* `ORG_ADMINS_CAN_SEE_ALL_USERS`: Controls whether any Organization Admin can view all users and teams, even those not associated with their Organization. (boolean, required)\n* `MANAGE_ORGANIZATION_AUTH`: Controls whether any Organization Admin has the privileges to create and manage users and teams. You may want to disable this ability if you are using an LDAP or SAML integration. (boolean, required)\n* `TOWER_URL_BASE`: This setting is used by services like notifications to render a valid url to the service. (string, required)\n* `REMOTE_HOST_HEADERS`: HTTP headers and meta keys to search to determine remote host name or IP. Add additional items to this list, such as "HTTP_X_FORWARDED_FOR", if behind a reverse proxy. See the "Proxy Support" section of the AAP Installation guide for more details. (list, required)\n* `PROXY_IP_ALLOWED_LIST`: If the service is behind a reverse proxy/load balancer, use this setting to configure the proxy IP addresses from which the service should trust custom REMOTE_HOST_HEADERS header values. If this setting is an empty list (the default), the headers specified by REMOTE_HOST_HEADERS will be trusted unconditionally') (list, required)\n* `CSRF_TRUSTED_ORIGINS`: If the service is behind a reverse proxy/load balancer, use this setting to configure the schema://addresses from which the service should trust Origin header values. (list, default=`[]`)\n\n* `REDHAT_USERNAME`: This username is used to send data to Automation Analytics (string, default=`\"\"`)\n* `REDHAT_PASSWORD`: This password is used to send data to Automation Analytics (string, default=`\"\"`)\n* `SUBSCRIPTIONS_USERNAME`: This username is used to retrieve subscription and content information (string, default=`\"\"`)\n* `SUBSCRIPTIONS_PASSWORD`: This password is used to retrieve subscription and content information (string, default=`\"\"`)\n* `AUTOMATION_ANALYTICS_URL`: This setting is used to to configure the upload URL for data collection for Automation Analytics. (string, default=`\"https://example.com\"`)\n\n\n\n* `DEFAULT_EXECUTION_ENVIRONMENT`: The Execution Environment to be used when one has not been configured for a job template. (field, default=`None`)\n* `CUSTOM_VENV_PATHS`: Paths where Tower will look for custom virtual environments (in addition to /var/lib/awx/venv/). Enter one path per line. (list, default=`[]`)\n* `AD_HOC_COMMANDS`: List of modules allowed to be used by ad-hoc jobs. (list, default=`['command', 'shell', 'yum', 'apt', 'apt_key', 'apt_repository', 'apt_rpm', 'service', 'group', 'user', 'mount', 'ping', 'selinux', 'setup', 'win_ping', 'win_service', 'win_updates', 'win_group', 'win_user']`)\n* `ALLOW_JINJA_IN_EXTRA_VARS`: Ansible allows variable substitution via the Jinja2 templating language for --extra-vars. This poses a potential security risk where users with the ability to specify extra vars at job launch time can use Jinja2 templates to run arbitrary Python. It is recommended that this value be set to "template" or "never". (choice, required)\n - `always`: Always\n - `never`: Never\n - `template`: Only On Job Template Definitions (default)\n* `AWX_ISOLATION_BASE_PATH`: The directory in which the service will create new temporary directories for job execution and isolation (such as credential files). (string, required)\n* `AWX_ISOLATION_SHOW_PATHS`: List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. Volumes will be mounted from the execution node to the container. The supported format is HOST-DIR[:CONTAINER-DIR[:OPTIONS]]. (list, default=`[]`)\n* `AWX_TASK_ENV`: Additional environment variables set for playbook runs, inventory updates, project updates, and notification sending. (nested object, default=`{}`)\n* `AWX_RUNNER_KEEPALIVE_SECONDS`: Only applies to jobs running in a Container Group. If not 0, send a message every so-many seconds to keep connection open. (integer, required)\n* `GALAXY_TASK_ENV`: Additional environment variables set for invocations of ansible-galaxy within project updates. Useful if you must use a proxy server for ansible-galaxy but not git. (nested object, required)\n* `INSIGHTS_TRACKING_STATE`: Enables the service to gather data on automation and send it to Automation Analytics. (boolean, default=`False`)\n* `PROJECT_UPDATE_VVV`: Adds the CLI -vvv flag to ansible-playbook runs of project_update.yml used for project updates. (boolean, required)\n* `AWX_ROLES_ENABLED`: Allows roles to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean, default=`True`)\n* `AWX_COLLECTIONS_ENABLED`: Allows collections to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean, default=`True`)\n* `AWX_SHOW_PLAYBOOK_LINKS`: Follow symbolic links when scanning for playbooks. Be aware that setting this to True can lead to infinite recursion if a link points to a parent directory of itself. (boolean, default=`False`)\n* `AWX_MOUNT_ISOLATED_PATHS_ON_K8S`: Expose paths via hostPath for the Pods created by a Container Group. HostPath volumes present many security risks, and it is a best practice to avoid the use of HostPaths when possible. (boolean, default=`False`)\n* `GALAXY_IGNORE_CERTS`: If set to true, certificate validation will not be done when installing content from any Galaxy server. (boolean, default=`False`)\n* `STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display before requiring the output be downloaded. (integer, required)\n* `EVENT_STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display for a single job or ad hoc command event. `stdout` will end with `\u2026` when truncated. (integer, required)\n* `MAX_WEBSOCKET_EVENT_RATE`: Maximum number of messages to update the UI live job output with per second. Value of 0 means no limit. (integer, default=`30`)\n* `SCHEDULE_MAX_JOBS`: Maximum number of the same job template that can be waiting to run when launching from a schedule before no more are created. (integer, required)\n* `AWX_ANSIBLE_CALLBACK_PLUGINS`: List of paths to search for extra callback plugins to be used when running jobs. Enter one path per line. (list, default=`[]`)\n* `DEFAULT_JOB_TIMEOUT`: Maximum time in seconds to allow jobs to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual job template will override this. (integer, default=`0`)\n* `DEFAULT_JOB_IDLE_TIMEOUT`: If no output is detected from ansible in this number of seconds the execution will be terminated. Use value of 0 to indicate that no idle timeout should be imposed. (integer, default=`0`)\n* `DEFAULT_INVENTORY_UPDATE_TIMEOUT`: Maximum time in seconds to allow inventory updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual inventory source will override this. (integer, default=`0`)\n* `DEFAULT_PROJECT_UPDATE_TIMEOUT`: Maximum time in seconds to allow project updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual project will override this. (integer, default=`0`)\n* `ANSIBLE_FACT_CACHE_TIMEOUT`: Maximum time, in seconds, that stored Ansible facts are considered valid since the last time they were modified. Only valid, non-stale, facts will be accessible by a playbook. Note, this does not influence the deletion of ansible_facts from the database. Use a value of 0 to indicate that no timeout should be imposed. (integer, default=`0`)\n* `MAX_FORKS`: Saving a Job Template with more than this number of forks will result in an error. When set to 0, no limit is applied. (integer, default=`200`)\n* `LOG_AGGREGATOR_HOST`: Hostname/IP where external logs will be sent to. (string, default=`\"\"`)\n* `LOG_AGGREGATOR_PORT`: Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator). (integer, default=`None`)\n* `LOG_AGGREGATOR_TYPE`: Format messages for the chosen log aggregator. (choice)\n - `None`: --------- (default)\n - `logstash`\n - `splunk`\n - `loggly`\n - `sumologic`\n - `other`\n* `LOG_AGGREGATOR_USERNAME`: Username for external log aggregator (if required; HTTP/s only). (string, default=`\"\"`)\n* `LOG_AGGREGATOR_PASSWORD`: Password or authentication token for external log aggregator (if required; HTTP/s only). (string, default=`\"\"`)\n* `LOG_AGGREGATOR_LOGGERS`: List of loggers that will send HTTP logs to the collector, these can include any or all of: \nawx - service logs\nactivity_stream - activity stream records\njob_events - callback data from Ansible job events\nsystem_tracking - facts gathered from scan jobs\nbroadcast_websocket - errors pertaining to websockets broadcast metrics\n (list, default=`['awx', 'activity_stream', 'job_events', 'system_tracking', 'broadcast_websocket']`)\n* `LOG_AGGREGATOR_INDIVIDUAL_FACTS`: If set, system tracking facts will be sent for each package, service, or other item found in a scan, allowing for greater search query granularity. If unset, facts will be sent as a single dictionary, allowing for greater efficiency in fact processing. (boolean, default=`False`)\n* `LOG_AGGREGATOR_ENABLED`: Enable sending logs to external log aggregator. (boolean, default=`False`)\n* `LOG_AGGREGATOR_TOWER_UUID`: Useful to uniquely identify instances. (string, default=`\"\"`)\n* `LOG_AGGREGATOR_PROTOCOL`: Protocol used to communicate with log aggregator. HTTPS/HTTP assumes HTTPS unless http:// is explicitly used in the Logging Aggregator hostname. (choice)\n - `https`: HTTPS/HTTP (default)\n - `tcp`: TCP\n - `udp`: UDP\n* `LOG_AGGREGATOR_TCP_TIMEOUT`: Number of seconds for a TCP connection to external log aggregator to timeout. Applies to HTTPS and TCP log aggregator protocols. (integer, default=`5`)\n* `LOG_AGGREGATOR_VERIFY_CERT`: Flag to control enable/disable of certificate verification when LOG_AGGREGATOR_PROTOCOL is "https". If enabled, the log handler will verify certificate sent by external log aggregator before establishing connection. (boolean, default=`True`)\n* `LOG_AGGREGATOR_LEVEL`: Level threshold used by log handler. Severities from lowest to highest are DEBUG, INFO, WARNING, ERROR, CRITICAL. Messages less severe than the threshold will be ignored by log handler. (messages under category awx.anlytics ignore this setting) (choice)\n - `DEBUG`\n - `INFO` (default)\n - `WARNING`\n - `ERROR`\n - `CRITICAL`\n* `LOG_AGGREGATOR_ACTION_QUEUE_SIZE`: Defines how large the rsyslog action queue can grow in number of messages stored. This can have an impact on memory utilization. When the queue reaches 75% of this number, the queue will start writing to disk (queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and DEBUG messages will start to be discarded (queue.discardMark with queue.discardSeverity=5). (integer, default=`131072`)\n* `LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB`: Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH. (integer, default=`1`)\n* `LOG_AGGREGATOR_MAX_DISK_USAGE_PATH`: Location to persist logs that should be retried after an outage of the external log aggregator (defaults to /var/lib/awx). Equivalent to the rsyslogd queue.spoolDirectory setting. (string, default=`\"/var/lib/awx\"`)\n* `LOG_AGGREGATOR_RSYSLOGD_DEBUG`: Enabled high verbosity debugging for rsyslogd. Useful for debugging connection issues for external log aggregation. (boolean, default=`False`)\n* `API_400_ERROR_LOG_FORMAT`: The format of logged messages when an API 4XX error occurs, the following variables will be substituted: \nstatus_code - The HTTP status code of the error\nuser_name - The user name attempting to use the API\nurl_path - The URL path to the API endpoint called\nremote_addr - The remote address seen for the user\nerror - The error set by the api endpoint\nVariables need to be in the format {<variable name>}. (string, default=`\"status {status_code} received by user {user_name} attempting to access {url_path} from {remote_addr}\"`)\n* `AUTOMATION_ANALYTICS_LAST_GATHER`: (datetime, required)\n* `AUTOMATION_ANALYTICS_LAST_ENTRIES`: (string, default=`\"\"`)\n* `AUTOMATION_ANALYTICS_GATHER_INTERVAL`: Interval (in seconds) between data gathering. (integer, default=`14400`)\n\n* `BULK_JOB_MAX_LAUNCH`: Max jobs to allow bulk jobs to launch (integer, default=`100`)\n* `BULK_HOST_MAX_CREATE`: Max number of hosts to allow to be created in a single bulk action (integer, default=`100`)\n* `UI_NEXT`: Enable preview of new user interface. (boolean, default=`True`)\n* `SUBSCRIPTION_USAGE_MODEL`: (choice)\n - `\"\"`: Default model for AWX - no subscription. Deletion of host_metrics will not be considered for purposes of managed host counting (default)\n - `unique_managed_hosts`: Usage based on unique managed nodes in a large historical time frame and delete functionality for no longer used managed nodes\n* `CLEANUP_HOST_METRICS_LAST_TS`: (datetime, required)\n* `HOST_METRIC_SUMMARY_TASK_LAST_TS`: (datetime, required)\n* `AWX_CLEANUP_PATHS`: Enable or Disable TMP Dir cleanup (boolean, default=`True`)\n* `AWX_REQUEST_PROFILE`: Debug web request python timing (boolean, default=`False`)\n* `DEFAULT_CONTAINER_RUN_OPTIONS`: List of options to pass to podman run example: ['--network', 'slirp4netns:enable_ipv6=true', '--log-level', 'debug'] (list, default=`['--network', 'slirp4netns:enable_ipv6=true']`)\n* `RECEPTOR_RELEASE_WORK`: Release receptor work (boolean, default=`True`)\n* `SESSION_COOKIE_AGE`: Number of seconds that a user is inactive before they will need to login again. (integer, required)\n* `SESSIONS_PER_USER`: Maximum number of simultaneous logged in sessions a user may have. To disable enter -1. (integer, required)\n* `DISABLE_LOCAL_AUTH`: Controls whether users are prevented from using the built-in authentication system. You probably want to do this if you are using an LDAP or SAML integration. (boolean, required)\n* `AUTH_BASIC_ENABLED`: Enable HTTP Basic Auth for the API Browser. (boolean, required)\n* `OAUTH2_PROVIDER`: Dictionary for customizing OAuth 2 timeouts, available items are `ACCESS_TOKEN_EXPIRE_SECONDS`, the duration of access tokens in the number of seconds, `AUTHORIZATION_CODE_EXPIRE_SECONDS`, the duration of authorization codes in the number of seconds, and `REFRESH_TOKEN_EXPIRE_SECONDS`, the duration of refresh tokens, after expired access tokens, in the number of seconds. (nested object, default=`{'ACCESS_TOKEN_EXPIRE_SECONDS': 31536000000, 'AUTHORIZATION_CODE_EXPIRE_SECONDS': 600, 'REFRESH_TOKEN_EXPIRE_SECONDS': 2628000}`)\n* `ALLOW_OAUTH2_FOR_EXTERNAL_USERS`: For security reasons, users from external auth providers (LDAP, SAML, SSO, Radius, and others) are not allowed to create OAuth2 tokens. To change this behavior, enable this setting. Existing tokens will not be deleted when this setting is toggled off. (boolean, default=`False`)\n* `LOGIN_REDIRECT_OVERRIDE`: URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page. (string, default=`\"\"`)\n* `ALLOW_METRICS_FOR_ANONYMOUS_USERS`: If true, anonymous users are allowed to poll metrics. (boolean, default=`False`)\n\n* `CUSTOM_LOGIN_INFO`: If needed, you can add specific information (such as a legal notice or a disclaimer) to a text box in the login modal using this setting. Any content added must be in plain text or an HTML fragment, as other markup languages are not supported. (string, default=`\"\"`)\n* `CUSTOM_LOGO`: To set up a custom logo, provide a file that you create. For the custom logo to look its best, use a .png file with a transparent background. GIF, PNG and JPEG formats are supported. (string, default=`\"\"`)\n* `MAX_UI_JOB_EVENTS`: Maximum number of job events for the UI to retrieve within a single request. (integer, required)\n* `UI_LIVE_UPDATES_ENABLED`: If disabled, the page will not refresh when events are received. Reloading the page will be required to get the latest details. (boolean, required)\n\n* `SOCIAL_AUTH_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_USER_FIELDS`: When set to an empty list `[]`, this setting prevents new user accounts from being created. Only users who have previously logged in using social auth or have a user account with a matching email address will be able to login. (list, default=`None`)\n* `SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL`: Enabling this setting will tell social auth to use the full Email as username instead of the full name (boolean, default=`False`)\n* `AUTH_LDAP_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_1_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_1_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_1_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_1_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_1_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_1_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_1_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_1_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_1_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_1_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_1_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_1_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_1_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_1_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_1_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_1_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_2_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_2_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_2_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_2_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_2_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_2_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_2_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_2_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_2_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_2_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_2_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_2_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_2_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_2_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_2_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_2_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_3_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_3_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_3_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_3_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_3_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_3_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_3_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_3_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_3_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_3_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_3_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_3_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_3_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_3_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_3_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_3_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_4_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_4_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_4_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_4_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_4_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_4_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_4_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_4_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_4_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_4_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_4_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_4_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_4_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_4_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_4_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_4_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_5_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_5_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_5_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_5_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_5_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_5_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_5_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_5_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_5_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_5_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_5_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_5_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_5_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_5_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_5_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_5_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `RADIUS_SERVER`: Hostname/IP of RADIUS server. RADIUS authentication is disabled if this setting is empty. (string, default=`\"\"`)\n* `RADIUS_PORT`: Port of RADIUS server. (integer, default=`1812`)\n* `RADIUS_SECRET`: Shared secret for authenticating to RADIUS server. (string, default=`\"\"`)\n* `TACACSPLUS_HOST`: Hostname of TACACS+ server. (string, default=`\"\"`)\n* `TACACSPLUS_PORT`: Port number of TACACS+ server. (integer, default=`49`)\n* `TACACSPLUS_SECRET`: Shared secret for authenticating to TACACS+ server. (string, default=`\"\"`)\n* `TACACSPLUS_SESSION_TIMEOUT`: TACACS+ session timeout value in seconds, 0 disables timeout. (integer, default=`5`)\n* `TACACSPLUS_AUTH_PROTOCOL`: Choose the authentication protocol used by TACACS+ client. (choice)\n - `ascii` (default)\n - `pap`\n* `TACACSPLUS_REM_ADDR`: Enable the client address sending by TACACS+ client. (boolean, default=`False`)\n\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_KEY`: The OAuth2 key from your web application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET`: The OAuth2 secret from your web application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS`: Update this setting to restrict the domains who are allowed to login using Google OAuth2. (list, default=`[]`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_AUTH_EXTRA_ARGUMENTS`: Extra arguments for Google OAuth2 login. You can restrict it to only allow a single domain to authenticate, even if the user is logged in with multple Google accounts. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_KEY`: The OAuth2 key (Client ID) from your GitHub developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_SECRET`: The OAuth2 secret (Client Secret) from your GitHub developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORG_NAME`: The name of your GitHub organization, as used in your organization's URL: https://github.com/<yourorg>/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_TEAM_ID`: Find the numeric team ID using the Github API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME`: The name of your GitHub Enterprise organization, as used in your organization's URL: https://github.com/<yourorg>/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID`: Find the numeric team ID using the Github Enterprise API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_KEY`: The OAuth2 key (Client ID) from your Azure AD application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET`: The OAuth2 secret (Client Secret) from your Azure AD application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_OIDC_KEY`: The OIDC key (Client ID) from your IDP. (string, default=`\"\"`)\n* `SOCIAL_AUTH_OIDC_SECRET`: The OIDC secret (Client Secret) from your IDP. (string, default=`\"\"`)\n* `SOCIAL_AUTH_OIDC_OIDC_ENDPOINT`: The URL for your OIDC provider including the path up to /.well-known/openid-configuration (string, default=`\"\"`)\n* `SOCIAL_AUTH_OIDC_VERIFY_SSL`: Verify the OIDC provider ssl certificate. (boolean, default=`True`)\n* `SAML_AUTO_CREATE_OBJECTS`: When enabled (the default), mapped Organizations and Teams will be created automatically on successful SAML login. (boolean, default=`True`)\n\n\n* `SOCIAL_AUTH_SAML_SP_ENTITY_ID`: The application-defined unique identifier used as the audience of the SAML service provider (SP) configuration. This is usually the URL for the service. (string, default=`\"\"`)\n* `SOCIAL_AUTH_SAML_SP_PUBLIC_CERT`: Create a keypair to use as a service provider (SP) and include the certificate content here. (string, required)\n* `SOCIAL_AUTH_SAML_SP_PRIVATE_KEY`: Create a keypair to use as a service provider (SP) and include the private key content here. (string, required)\n* `SOCIAL_AUTH_SAML_ORG_INFO`: Provide the URL, display name, and the name of your app. Refer to the documentation for example syntax. (nested object, required)\n* `SOCIAL_AUTH_SAML_TECHNICAL_CONTACT`: Provide the name and email address of the technical contact for your service provider. Refer to the documentation for example syntax. (nested object, required)\n* `SOCIAL_AUTH_SAML_SUPPORT_CONTACT`: Provide the name and email address of the support contact for your service provider. Refer to the documentation for example syntax. (nested object, required)\n* `SOCIAL_AUTH_SAML_ENABLED_IDPS`: Configure the Entity ID, SSO URL and certificate for each identity provider (IdP) in use. Multiple SAML IdPs are supported. Some IdPs may provide user data using attribute names that differ from the default OIDs. Attribute names may be overridden for each IdP. Refer to the Ansible documentation for additional details and syntax. (nested object, default=`{}`)\n* `SOCIAL_AUTH_SAML_SECURITY_CONFIG`: A dict of key value pairs that are passed to the underlying python-saml security setting https://github.com/onelogin/python-saml#settings (nested object, default=`{'requestedAuthnContext': False}`)\n* `SOCIAL_AUTH_SAML_SP_EXTRA`: A dict of key value pairs to be passed to the underlying python-saml Service Provider configuration setting. (nested object, default=`None`)\n* `SOCIAL_AUTH_SAML_EXTRA_DATA`: A list of tuples that maps IDP attributes to extra_attributes. Each attribute will be a list of values, even if only 1 value. (list, default=`None`)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_SAML_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_ATTR`: Used to translate user organization membership. (nested object, default=`{}`)\n* `SOCIAL_AUTH_SAML_TEAM_ATTR`: Used to translate user team membership. (nested object, default=`{}`)\n* `SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR`: Used to map super users and system auditors from SAML. (nested object, default=`{}`)\n* `LOCAL_PASSWORD_MIN_LENGTH`: Minimum number of characters required in a local password. 0 means no minimum (integer, default=`0`)\n* `LOCAL_PASSWORD_MIN_DIGITS`: Minimum number of digit characters required in a local password. 0 means no minimum (integer, default=`0`)\n* `LOCAL_PASSWORD_MIN_UPPER`: Minimum number of uppercase characters required in a local password. 0 means no minimum (integer, default=`0`)\n* `LOCAL_PASSWORD_MIN_SPECIAL`: Minimum number of special characters required in a local password. 0 means no minimum (integer, default=`0`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Setting:\n\nMake a DELETE request to this resource to delete this setting.", + "operationId": "api_settings_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single setting", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `ACTIVITY_STREAM_ENABLED`: Enable capturing activity for the activity stream. (boolean)\n* `ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC`: Enable capturing activity for the activity stream when running inventory sync. (boolean)\n* `ORG_ADMINS_CAN_SEE_ALL_USERS`: Controls whether any Organization Admin can view all users and teams, even those not associated with their Organization. (boolean)\n* `MANAGE_ORGANIZATION_AUTH`: Controls whether any Organization Admin has the privileges to create and manage users and teams. You may want to disable this ability if you are using an LDAP or SAML integration. (boolean)\n* `TOWER_URL_BASE`: This setting is used by services like notifications to render a valid url to the service. (string)\n* `REMOTE_HOST_HEADERS`: HTTP headers and meta keys to search to determine remote host name or IP. Add additional items to this list, such as "HTTP_X_FORWARDED_FOR", if behind a reverse proxy. See the "Proxy Support" section of the AAP Installation guide for more details. (list)\n* `PROXY_IP_ALLOWED_LIST`: If the service is behind a reverse proxy/load balancer, use this setting to configure the proxy IP addresses from which the service should trust custom REMOTE_HOST_HEADERS header values. If this setting is an empty list (the default), the headers specified by REMOTE_HOST_HEADERS will be trusted unconditionally') (list)\n* `CSRF_TRUSTED_ORIGINS`: If the service is behind a reverse proxy/load balancer, use this setting to configure the schema://addresses from which the service should trust Origin header values. (list)\n* `LICENSE`: The license controls which features and functionality are enabled. Use /api/v2/config/ to update or change the license. (nested object)\n* `REDHAT_USERNAME`: This username is used to send data to Automation Analytics (string)\n* `REDHAT_PASSWORD`: This password is used to send data to Automation Analytics (string)\n* `SUBSCRIPTIONS_USERNAME`: This username is used to retrieve subscription and content information (string)\n* `SUBSCRIPTIONS_PASSWORD`: This password is used to retrieve subscription and content information (string)\n* `AUTOMATION_ANALYTICS_URL`: This setting is used to to configure the upload URL for data collection for Automation Analytics. (string)\n* `INSTALL_UUID`: (string)\n* `DEFAULT_CONTROL_PLANE_QUEUE_NAME`: (string)\n* `DEFAULT_EXECUTION_QUEUE_NAME`: (string)\n* `DEFAULT_EXECUTION_ENVIRONMENT`: The Execution Environment to be used when one has not been configured for a job template. (field)\n* `CUSTOM_VENV_PATHS`: Paths where Tower will look for custom virtual environments (in addition to /var/lib/awx/venv/). Enter one path per line. (list)\n* `AD_HOC_COMMANDS`: List of modules allowed to be used by ad-hoc jobs. (list)\n* `ALLOW_JINJA_IN_EXTRA_VARS`: Ansible allows variable substitution via the Jinja2 templating language for --extra-vars. This poses a potential security risk where users with the ability to specify extra vars at job launch time can use Jinja2 templates to run arbitrary Python. It is recommended that this value be set to "template" or "never". (choice)\n - `always`: Always\n - `never`: Never\n - `template`: Only On Job Template Definitions\n* `AWX_ISOLATION_BASE_PATH`: The directory in which the service will create new temporary directories for job execution and isolation (such as credential files). (string)\n* `AWX_ISOLATION_SHOW_PATHS`: List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. Volumes will be mounted from the execution node to the container. The supported format is HOST-DIR[:CONTAINER-DIR[:OPTIONS]]. (list)\n* `AWX_TASK_ENV`: Additional environment variables set for playbook runs, inventory updates, project updates, and notification sending. (nested object)\n* `AWX_RUNNER_KEEPALIVE_SECONDS`: Only applies to jobs running in a Container Group. If not 0, send a message every so-many seconds to keep connection open. (integer)\n* `GALAXY_TASK_ENV`: Additional environment variables set for invocations of ansible-galaxy within project updates. Useful if you must use a proxy server for ansible-galaxy but not git. (nested object)\n* `INSIGHTS_TRACKING_STATE`: Enables the service to gather data on automation and send it to Automation Analytics. (boolean)\n* `PROJECT_UPDATE_VVV`: Adds the CLI -vvv flag to ansible-playbook runs of project_update.yml used for project updates. (boolean)\n* `AWX_ROLES_ENABLED`: Allows roles to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean)\n* `AWX_COLLECTIONS_ENABLED`: Allows collections to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean)\n* `AWX_SHOW_PLAYBOOK_LINKS`: Follow symbolic links when scanning for playbooks. Be aware that setting this to True can lead to infinite recursion if a link points to a parent directory of itself. (boolean)\n* `AWX_MOUNT_ISOLATED_PATHS_ON_K8S`: Expose paths via hostPath for the Pods created by a Container Group. HostPath volumes present many security risks, and it is a best practice to avoid the use of HostPaths when possible. (boolean)\n* `GALAXY_IGNORE_CERTS`: If set to true, certificate validation will not be done when installing content from any Galaxy server. (boolean)\n* `STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display before requiring the output be downloaded. (integer)\n* `EVENT_STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display for a single job or ad hoc command event. `stdout` will end with `\u2026` when truncated. (integer)\n* `MAX_WEBSOCKET_EVENT_RATE`: Maximum number of messages to update the UI live job output with per second. Value of 0 means no limit. (integer)\n* `SCHEDULE_MAX_JOBS`: Maximum number of the same job template that can be waiting to run when launching from a schedule before no more are created. (integer)\n* `AWX_ANSIBLE_CALLBACK_PLUGINS`: List of paths to search for extra callback plugins to be used when running jobs. Enter one path per line. (list)\n* `DEFAULT_JOB_TIMEOUT`: Maximum time in seconds to allow jobs to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual job template will override this. (integer)\n* `DEFAULT_JOB_IDLE_TIMEOUT`: If no output is detected from ansible in this number of seconds the execution will be terminated. Use value of 0 to indicate that no idle timeout should be imposed. (integer)\n* `DEFAULT_INVENTORY_UPDATE_TIMEOUT`: Maximum time in seconds to allow inventory updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual inventory source will override this. (integer)\n* `DEFAULT_PROJECT_UPDATE_TIMEOUT`: Maximum time in seconds to allow project updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual project will override this. (integer)\n* `ANSIBLE_FACT_CACHE_TIMEOUT`: Maximum time, in seconds, that stored Ansible facts are considered valid since the last time they were modified. Only valid, non-stale, facts will be accessible by a playbook. Note, this does not influence the deletion of ansible_facts from the database. Use a value of 0 to indicate that no timeout should be imposed. (integer)\n* `MAX_FORKS`: Saving a Job Template with more than this number of forks will result in an error. When set to 0, no limit is applied. (integer)\n* `LOG_AGGREGATOR_HOST`: Hostname/IP where external logs will be sent to. (string)\n* `LOG_AGGREGATOR_PORT`: Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator). (integer)\n* `LOG_AGGREGATOR_TYPE`: Format messages for the chosen log aggregator. (choice)\n - `None`: ---------\n - `logstash`\n - `splunk`\n - `loggly`\n - `sumologic`\n - `other`\n* `LOG_AGGREGATOR_USERNAME`: Username for external log aggregator (if required; HTTP/s only). (string)\n* `LOG_AGGREGATOR_PASSWORD`: Password or authentication token for external log aggregator (if required; HTTP/s only). (string)\n* `LOG_AGGREGATOR_LOGGERS`: List of loggers that will send HTTP logs to the collector, these can include any or all of: \nawx - service logs\nactivity_stream - activity stream records\njob_events - callback data from Ansible job events\nsystem_tracking - facts gathered from scan jobs\nbroadcast_websocket - errors pertaining to websockets broadcast metrics\n (list)\n* `LOG_AGGREGATOR_INDIVIDUAL_FACTS`: If set, system tracking facts will be sent for each package, service, or other item found in a scan, allowing for greater search query granularity. If unset, facts will be sent as a single dictionary, allowing for greater efficiency in fact processing. (boolean)\n* `LOG_AGGREGATOR_ENABLED`: Enable sending logs to external log aggregator. (boolean)\n* `LOG_AGGREGATOR_TOWER_UUID`: Useful to uniquely identify instances. (string)\n* `LOG_AGGREGATOR_PROTOCOL`: Protocol used to communicate with log aggregator. HTTPS/HTTP assumes HTTPS unless http:// is explicitly used in the Logging Aggregator hostname. (choice)\n - `https`: HTTPS/HTTP\n - `tcp`: TCP\n - `udp`: UDP\n* `LOG_AGGREGATOR_TCP_TIMEOUT`: Number of seconds for a TCP connection to external log aggregator to timeout. Applies to HTTPS and TCP log aggregator protocols. (integer)\n* `LOG_AGGREGATOR_VERIFY_CERT`: Flag to control enable/disable of certificate verification when LOG_AGGREGATOR_PROTOCOL is "https". If enabled, the log handler will verify certificate sent by external log aggregator before establishing connection. (boolean)\n* `LOG_AGGREGATOR_LEVEL`: Level threshold used by log handler. Severities from lowest to highest are DEBUG, INFO, WARNING, ERROR, CRITICAL. Messages less severe than the threshold will be ignored by log handler. (messages under category awx.anlytics ignore this setting) (choice)\n - `DEBUG`\n - `INFO`\n - `WARNING`\n - `ERROR`\n - `CRITICAL`\n* `LOG_AGGREGATOR_ACTION_QUEUE_SIZE`: Defines how large the rsyslog action queue can grow in number of messages stored. This can have an impact on memory utilization. When the queue reaches 75% of this number, the queue will start writing to disk (queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and DEBUG messages will start to be discarded (queue.discardMark with queue.discardSeverity=5). (integer)\n* `LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB`: Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH. (integer)\n* `LOG_AGGREGATOR_MAX_DISK_USAGE_PATH`: Location to persist logs that should be retried after an outage of the external log aggregator (defaults to /var/lib/awx). Equivalent to the rsyslogd queue.spoolDirectory setting. (string)\n* `LOG_AGGREGATOR_RSYSLOGD_DEBUG`: Enabled high verbosity debugging for rsyslogd. Useful for debugging connection issues for external log aggregation. (boolean)\n* `API_400_ERROR_LOG_FORMAT`: The format of logged messages when an API 4XX error occurs, the following variables will be substituted: \nstatus_code - The HTTP status code of the error\nuser_name - The user name attempting to use the API\nurl_path - The URL path to the API endpoint called\nremote_addr - The remote address seen for the user\nerror - The error set by the api endpoint\nVariables need to be in the format {<variable name>}. (string)\n* `AUTOMATION_ANALYTICS_LAST_GATHER`: (datetime)\n* `AUTOMATION_ANALYTICS_LAST_ENTRIES`: (string)\n* `AUTOMATION_ANALYTICS_GATHER_INTERVAL`: Interval (in seconds) between data gathering. (integer)\n* `IS_K8S`: Indicates whether the instance is part of a kubernetes-based deployment. (boolean)\n* `BULK_JOB_MAX_LAUNCH`: Max jobs to allow bulk jobs to launch (integer)\n* `BULK_HOST_MAX_CREATE`: Max number of hosts to allow to be created in a single bulk action (integer)\n* `UI_NEXT`: Enable preview of new user interface. (boolean)\n* `SUBSCRIPTION_USAGE_MODEL`: (choice)\n - `\"\"`: Default model for AWX - no subscription. Deletion of host_metrics will not be considered for purposes of managed host counting\n - `unique_managed_hosts`: Usage based on unique managed nodes in a large historical time frame and delete functionality for no longer used managed nodes\n* `CLEANUP_HOST_METRICS_LAST_TS`: (datetime)\n* `HOST_METRIC_SUMMARY_TASK_LAST_TS`: (datetime)\n* `AWX_CLEANUP_PATHS`: Enable or Disable TMP Dir cleanup (boolean)\n* `AWX_REQUEST_PROFILE`: Debug web request python timing (boolean)\n* `DEFAULT_CONTAINER_RUN_OPTIONS`: List of options to pass to podman run example: ['--network', 'slirp4netns:enable_ipv6=true', '--log-level', 'debug'] (list)\n* `RECEPTOR_RELEASE_WORK`: Release receptor work (boolean)\n* `SESSION_COOKIE_AGE`: Number of seconds that a user is inactive before they will need to login again. (integer)\n* `SESSIONS_PER_USER`: Maximum number of simultaneous logged in sessions a user may have. To disable enter -1. (integer)\n* `DISABLE_LOCAL_AUTH`: Controls whether users are prevented from using the built-in authentication system. You probably want to do this if you are using an LDAP or SAML integration. (boolean)\n* `AUTH_BASIC_ENABLED`: Enable HTTP Basic Auth for the API Browser. (boolean)\n* `OAUTH2_PROVIDER`: Dictionary for customizing OAuth 2 timeouts, available items are `ACCESS_TOKEN_EXPIRE_SECONDS`, the duration of access tokens in the number of seconds, `AUTHORIZATION_CODE_EXPIRE_SECONDS`, the duration of authorization codes in the number of seconds, and `REFRESH_TOKEN_EXPIRE_SECONDS`, the duration of refresh tokens, after expired access tokens, in the number of seconds. (nested object)\n* `ALLOW_OAUTH2_FOR_EXTERNAL_USERS`: For security reasons, users from external auth providers (LDAP, SAML, SSO, Radius, and others) are not allowed to create OAuth2 tokens. To change this behavior, enable this setting. Existing tokens will not be deleted when this setting is toggled off. (boolean)\n* `LOGIN_REDIRECT_OVERRIDE`: URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page. (string)\n* `ALLOW_METRICS_FOR_ANONYMOUS_USERS`: If true, anonymous users are allowed to poll metrics. (boolean)\n* `PENDO_TRACKING_STATE`: Enable or Disable User Analytics Tracking. (choice)\n - `off`: Off\n - `anonymous`: Anonymous\n - `detailed`: Detailed\n* `CUSTOM_LOGIN_INFO`: If needed, you can add specific information (such as a legal notice or a disclaimer) to a text box in the login modal using this setting. Any content added must be in plain text or an HTML fragment, as other markup languages are not supported. (string)\n* `CUSTOM_LOGO`: To set up a custom logo, provide a file that you create. For the custom logo to look its best, use a .png file with a transparent background. GIF, PNG and JPEG formats are supported. (string)\n* `MAX_UI_JOB_EVENTS`: Maximum number of job events for the UI to retrieve within a single request. (integer)\n* `UI_LIVE_UPDATES_ENABLED`: If disabled, the page will not refresh when events are received. Reloading the page will be required to get the latest details. (boolean)\n* `AUTHENTICATION_BACKENDS`: List of authentication backends that are enabled based on license features and other authentication settings. (list)\n* `SOCIAL_AUTH_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_USER_FIELDS`: When set to an empty list `[]`, this setting prevents new user accounts from being created. Only users who have previously logged in using social auth or have a user account with a matching email address will be able to login. (list)\n* `SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL`: Enabling this setting will tell social auth to use the full Email as username instead of the full name (boolean)\n* `AUTH_LDAP_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_1_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_1_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_1_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_1_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_1_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_1_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_1_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_1_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_1_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_1_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_1_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_1_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_1_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_1_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_1_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_1_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_2_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_2_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_2_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_2_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_2_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_2_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_2_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_2_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_2_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_2_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_2_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_2_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_2_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_2_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_2_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_2_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_3_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_3_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_3_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_3_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_3_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_3_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_3_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_3_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_3_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_3_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_3_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_3_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_3_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_3_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_3_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_3_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_4_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_4_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_4_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_4_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_4_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_4_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_4_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_4_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_4_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_4_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_4_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_4_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_4_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_4_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_4_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_4_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_5_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_5_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_5_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_5_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_5_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_5_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_5_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_5_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_5_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_5_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_5_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_5_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_5_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_5_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_5_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_5_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `RADIUS_SERVER`: Hostname/IP of RADIUS server. RADIUS authentication is disabled if this setting is empty. (string)\n* `RADIUS_PORT`: Port of RADIUS server. (integer)\n* `RADIUS_SECRET`: Shared secret for authenticating to RADIUS server. (string)\n* `TACACSPLUS_HOST`: Hostname of TACACS+ server. (string)\n* `TACACSPLUS_PORT`: Port number of TACACS+ server. (integer)\n* `TACACSPLUS_SECRET`: Shared secret for authenticating to TACACS+ server. (string)\n* `TACACSPLUS_SESSION_TIMEOUT`: TACACS+ session timeout value in seconds, 0 disables timeout. (integer)\n* `TACACSPLUS_AUTH_PROTOCOL`: Choose the authentication protocol used by TACACS+ client. (choice)\n - `ascii`\n - `pap`\n* `TACACSPLUS_REM_ADDR`: Enable the client address sending by TACACS+ client. (boolean)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_KEY`: The OAuth2 key from your web application. (string)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET`: The OAuth2 secret from your web application. (string)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS`: Update this setting to restrict the domains who are allowed to login using Google OAuth2. (list)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_AUTH_EXTRA_ARGUMENTS`: Extra arguments for Google OAuth2 login. You can restrict it to only allow a single domain to authenticate, even if the user is logged in with multple Google accounts. Refer to the documentation for more detail. (nested object)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_KEY`: The OAuth2 key (Client ID) from your GitHub developer application. (string)\n* `SOCIAL_AUTH_GITHUB_SECRET`: The OAuth2 secret (Client Secret) from your GitHub developer application. (string)\n* `SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ORG_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_NAME`: The name of your GitHub organization, as used in your organization's URL: https://github.com/<yourorg>/. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_TEAM_CALLBACK_URL`: Create an organization-owned application at https://github.com/organizations/<yourorg>/settings/applications and obtain an OAuth2 key (Client ID) and secret (Client Secret). Provide this URL as the callback URL for your application. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_ID`: Find the numeric team ID using the Github API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise developer application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise developer application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME`: The name of your GitHub Enterprise organization, as used in your organization's URL: https://github.com/<yourorg>/. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_CALLBACK_URL`: Create an organization-owned application at https://github.com/organizations/<yourorg>/settings/applications and obtain an OAuth2 key (Client ID) and secret (Client Secret). Provide this URL as the callback URL for your application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID`: Find the numeric team ID using the Github Enterprise API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_KEY`: The OAuth2 key (Client ID) from your Azure AD application. (string)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET`: The OAuth2 secret (Client Secret) from your Azure AD application. (string)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_OIDC_KEY`: The OIDC key (Client ID) from your IDP. (string)\n* `SOCIAL_AUTH_OIDC_SECRET`: The OIDC secret (Client Secret) from your IDP. (string)\n* `SOCIAL_AUTH_OIDC_OIDC_ENDPOINT`: The URL for your OIDC provider including the path up to /.well-known/openid-configuration (string)\n* `SOCIAL_AUTH_OIDC_VERIFY_SSL`: Verify the OIDC provider ssl certificate. (boolean)\n* `SAML_AUTO_CREATE_OBJECTS`: When enabled (the default), mapped Organizations and Teams will be created automatically on successful SAML login. (boolean)\n* `SOCIAL_AUTH_SAML_CALLBACK_URL`: Register the service as a service provider (SP) with each identity provider (IdP) you have configured. Provide your SP Entity ID and this ACS URL for your application. (string)\n* `SOCIAL_AUTH_SAML_METADATA_URL`: If your identity provider (IdP) allows uploading an XML metadata file, you can download one from this URL. (string)\n* `SOCIAL_AUTH_SAML_SP_ENTITY_ID`: The application-defined unique identifier used as the audience of the SAML service provider (SP) configuration. This is usually the URL for the service. (string)\n* `SOCIAL_AUTH_SAML_SP_PUBLIC_CERT`: Create a keypair to use as a service provider (SP) and include the certificate content here. (string)\n* `SOCIAL_AUTH_SAML_SP_PRIVATE_KEY`: Create a keypair to use as a service provider (SP) and include the private key content here. (string)\n* `SOCIAL_AUTH_SAML_ORG_INFO`: Provide the URL, display name, and the name of your app. Refer to the documentation for example syntax. (nested object)\n* `SOCIAL_AUTH_SAML_TECHNICAL_CONTACT`: Provide the name and email address of the technical contact for your service provider. Refer to the documentation for example syntax. (nested object)\n* `SOCIAL_AUTH_SAML_SUPPORT_CONTACT`: Provide the name and email address of the support contact for your service provider. Refer to the documentation for example syntax. (nested object)\n* `SOCIAL_AUTH_SAML_ENABLED_IDPS`: Configure the Entity ID, SSO URL and certificate for each identity provider (IdP) in use. Multiple SAML IdPs are supported. Some IdPs may provide user data using attribute names that differ from the default OIDs. Attribute names may be overridden for each IdP. Refer to the Ansible documentation for additional details and syntax. (nested object)\n* `SOCIAL_AUTH_SAML_SECURITY_CONFIG`: A dict of key value pairs that are passed to the underlying python-saml security setting https://github.com/onelogin/python-saml#settings (nested object)\n* `SOCIAL_AUTH_SAML_SP_EXTRA`: A dict of key value pairs to be passed to the underlying python-saml Service Provider configuration setting. (nested object)\n* `SOCIAL_AUTH_SAML_EXTRA_DATA`: A list of tuples that maps IDP attributes to extra_attributes. Each attribute will be a list of values, even if only 1 value. (list)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_SAML_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_ATTR`: Used to translate user organization membership. (nested object)\n* `SOCIAL_AUTH_SAML_TEAM_ATTR`: Used to translate user team membership. (nested object)\n* `SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR`: Used to map super users and system auditors from SAML. (nested object)\n* `LOCAL_PASSWORD_MIN_LENGTH`: Minimum number of characters required in a local password. 0 means no minimum (integer)\n* `LOCAL_PASSWORD_MIN_DIGITS`: Minimum number of digit characters required in a local password. 0 means no minimum (integer)\n* `LOCAL_PASSWORD_MIN_UPPER`: Minimum number of uppercase characters required in a local password. 0 means no minimum (integer)\n* `LOCAL_PASSWORD_MIN_SPECIAL`: Minimum number of special characters required in a local password. 0 means no minimum (integer)\n\n\n\n\n\n# Update a Setting:\n\nMake a PUT or PATCH request to this resource to update this\nsetting. The following fields may be modified:\n\n\n* `ACTIVITY_STREAM_ENABLED`: Enable capturing activity for the activity stream. (boolean, required)\n* `ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC`: Enable capturing activity for the activity stream when running inventory sync. (boolean, required)\n* `ORG_ADMINS_CAN_SEE_ALL_USERS`: Controls whether any Organization Admin can view all users and teams, even those not associated with their Organization. (boolean, required)\n* `MANAGE_ORGANIZATION_AUTH`: Controls whether any Organization Admin has the privileges to create and manage users and teams. You may want to disable this ability if you are using an LDAP or SAML integration. (boolean, required)\n* `TOWER_URL_BASE`: This setting is used by services like notifications to render a valid url to the service. (string, required)\n* `REMOTE_HOST_HEADERS`: HTTP headers and meta keys to search to determine remote host name or IP. Add additional items to this list, such as "HTTP_X_FORWARDED_FOR", if behind a reverse proxy. See the "Proxy Support" section of the AAP Installation guide for more details. (list, required)\n* `PROXY_IP_ALLOWED_LIST`: If the service is behind a reverse proxy/load balancer, use this setting to configure the proxy IP addresses from which the service should trust custom REMOTE_HOST_HEADERS header values. If this setting is an empty list (the default), the headers specified by REMOTE_HOST_HEADERS will be trusted unconditionally') (list, required)\n* `CSRF_TRUSTED_ORIGINS`: If the service is behind a reverse proxy/load balancer, use this setting to configure the schema://addresses from which the service should trust Origin header values. (list, default=`[]`)\n\n* `REDHAT_USERNAME`: This username is used to send data to Automation Analytics (string, default=`\"\"`)\n* `REDHAT_PASSWORD`: This password is used to send data to Automation Analytics (string, default=`\"\"`)\n* `SUBSCRIPTIONS_USERNAME`: This username is used to retrieve subscription and content information (string, default=`\"\"`)\n* `SUBSCRIPTIONS_PASSWORD`: This password is used to retrieve subscription and content information (string, default=`\"\"`)\n* `AUTOMATION_ANALYTICS_URL`: This setting is used to to configure the upload URL for data collection for Automation Analytics. (string, default=`\"https://example.com\"`)\n\n\n\n* `DEFAULT_EXECUTION_ENVIRONMENT`: The Execution Environment to be used when one has not been configured for a job template. (field, default=`None`)\n* `CUSTOM_VENV_PATHS`: Paths where Tower will look for custom virtual environments (in addition to /var/lib/awx/venv/). Enter one path per line. (list, default=`[]`)\n* `AD_HOC_COMMANDS`: List of modules allowed to be used by ad-hoc jobs. (list, default=`['command', 'shell', 'yum', 'apt', 'apt_key', 'apt_repository', 'apt_rpm', 'service', 'group', 'user', 'mount', 'ping', 'selinux', 'setup', 'win_ping', 'win_service', 'win_updates', 'win_group', 'win_user']`)\n* `ALLOW_JINJA_IN_EXTRA_VARS`: Ansible allows variable substitution via the Jinja2 templating language for --extra-vars. This poses a potential security risk where users with the ability to specify extra vars at job launch time can use Jinja2 templates to run arbitrary Python. It is recommended that this value be set to "template" or "never". (choice, required)\n - `always`: Always\n - `never`: Never\n - `template`: Only On Job Template Definitions (default)\n* `AWX_ISOLATION_BASE_PATH`: The directory in which the service will create new temporary directories for job execution and isolation (such as credential files). (string, required)\n* `AWX_ISOLATION_SHOW_PATHS`: List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. Volumes will be mounted from the execution node to the container. The supported format is HOST-DIR[:CONTAINER-DIR[:OPTIONS]]. (list, default=`[]`)\n* `AWX_TASK_ENV`: Additional environment variables set for playbook runs, inventory updates, project updates, and notification sending. (nested object, default=`{}`)\n* `AWX_RUNNER_KEEPALIVE_SECONDS`: Only applies to jobs running in a Container Group. If not 0, send a message every so-many seconds to keep connection open. (integer, required)\n* `GALAXY_TASK_ENV`: Additional environment variables set for invocations of ansible-galaxy within project updates. Useful if you must use a proxy server for ansible-galaxy but not git. (nested object, required)\n* `INSIGHTS_TRACKING_STATE`: Enables the service to gather data on automation and send it to Automation Analytics. (boolean, default=`False`)\n* `PROJECT_UPDATE_VVV`: Adds the CLI -vvv flag to ansible-playbook runs of project_update.yml used for project updates. (boolean, required)\n* `AWX_ROLES_ENABLED`: Allows roles to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean, default=`True`)\n* `AWX_COLLECTIONS_ENABLED`: Allows collections to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean, default=`True`)\n* `AWX_SHOW_PLAYBOOK_LINKS`: Follow symbolic links when scanning for playbooks. Be aware that setting this to True can lead to infinite recursion if a link points to a parent directory of itself. (boolean, default=`False`)\n* `AWX_MOUNT_ISOLATED_PATHS_ON_K8S`: Expose paths via hostPath for the Pods created by a Container Group. HostPath volumes present many security risks, and it is a best practice to avoid the use of HostPaths when possible. (boolean, default=`False`)\n* `GALAXY_IGNORE_CERTS`: If set to true, certificate validation will not be done when installing content from any Galaxy server. (boolean, default=`False`)\n* `STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display before requiring the output be downloaded. (integer, required)\n* `EVENT_STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display for a single job or ad hoc command event. `stdout` will end with `\u2026` when truncated. (integer, required)\n* `MAX_WEBSOCKET_EVENT_RATE`: Maximum number of messages to update the UI live job output with per second. Value of 0 means no limit. (integer, default=`30`)\n* `SCHEDULE_MAX_JOBS`: Maximum number of the same job template that can be waiting to run when launching from a schedule before no more are created. (integer, required)\n* `AWX_ANSIBLE_CALLBACK_PLUGINS`: List of paths to search for extra callback plugins to be used when running jobs. Enter one path per line. (list, default=`[]`)\n* `DEFAULT_JOB_TIMEOUT`: Maximum time in seconds to allow jobs to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual job template will override this. (integer, default=`0`)\n* `DEFAULT_JOB_IDLE_TIMEOUT`: If no output is detected from ansible in this number of seconds the execution will be terminated. Use value of 0 to indicate that no idle timeout should be imposed. (integer, default=`0`)\n* `DEFAULT_INVENTORY_UPDATE_TIMEOUT`: Maximum time in seconds to allow inventory updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual inventory source will override this. (integer, default=`0`)\n* `DEFAULT_PROJECT_UPDATE_TIMEOUT`: Maximum time in seconds to allow project updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual project will override this. (integer, default=`0`)\n* `ANSIBLE_FACT_CACHE_TIMEOUT`: Maximum time, in seconds, that stored Ansible facts are considered valid since the last time they were modified. Only valid, non-stale, facts will be accessible by a playbook. Note, this does not influence the deletion of ansible_facts from the database. Use a value of 0 to indicate that no timeout should be imposed. (integer, default=`0`)\n* `MAX_FORKS`: Saving a Job Template with more than this number of forks will result in an error. When set to 0, no limit is applied. (integer, default=`200`)\n* `LOG_AGGREGATOR_HOST`: Hostname/IP where external logs will be sent to. (string, default=`\"\"`)\n* `LOG_AGGREGATOR_PORT`: Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator). (integer, default=`None`)\n* `LOG_AGGREGATOR_TYPE`: Format messages for the chosen log aggregator. (choice)\n - `None`: --------- (default)\n - `logstash`\n - `splunk`\n - `loggly`\n - `sumologic`\n - `other`\n* `LOG_AGGREGATOR_USERNAME`: Username for external log aggregator (if required; HTTP/s only). (string, default=`\"\"`)\n* `LOG_AGGREGATOR_PASSWORD`: Password or authentication token for external log aggregator (if required; HTTP/s only). (string, default=`\"\"`)\n* `LOG_AGGREGATOR_LOGGERS`: List of loggers that will send HTTP logs to the collector, these can include any or all of: \nawx - service logs\nactivity_stream - activity stream records\njob_events - callback data from Ansible job events\nsystem_tracking - facts gathered from scan jobs\nbroadcast_websocket - errors pertaining to websockets broadcast metrics\n (list, default=`['awx', 'activity_stream', 'job_events', 'system_tracking', 'broadcast_websocket']`)\n* `LOG_AGGREGATOR_INDIVIDUAL_FACTS`: If set, system tracking facts will be sent for each package, service, or other item found in a scan, allowing for greater search query granularity. If unset, facts will be sent as a single dictionary, allowing for greater efficiency in fact processing. (boolean, default=`False`)\n* `LOG_AGGREGATOR_ENABLED`: Enable sending logs to external log aggregator. (boolean, default=`False`)\n* `LOG_AGGREGATOR_TOWER_UUID`: Useful to uniquely identify instances. (string, default=`\"\"`)\n* `LOG_AGGREGATOR_PROTOCOL`: Protocol used to communicate with log aggregator. HTTPS/HTTP assumes HTTPS unless http:// is explicitly used in the Logging Aggregator hostname. (choice)\n - `https`: HTTPS/HTTP (default)\n - `tcp`: TCP\n - `udp`: UDP\n* `LOG_AGGREGATOR_TCP_TIMEOUT`: Number of seconds for a TCP connection to external log aggregator to timeout. Applies to HTTPS and TCP log aggregator protocols. (integer, default=`5`)\n* `LOG_AGGREGATOR_VERIFY_CERT`: Flag to control enable/disable of certificate verification when LOG_AGGREGATOR_PROTOCOL is "https". If enabled, the log handler will verify certificate sent by external log aggregator before establishing connection. (boolean, default=`True`)\n* `LOG_AGGREGATOR_LEVEL`: Level threshold used by log handler. Severities from lowest to highest are DEBUG, INFO, WARNING, ERROR, CRITICAL. Messages less severe than the threshold will be ignored by log handler. (messages under category awx.anlytics ignore this setting) (choice)\n - `DEBUG`\n - `INFO` (default)\n - `WARNING`\n - `ERROR`\n - `CRITICAL`\n* `LOG_AGGREGATOR_ACTION_QUEUE_SIZE`: Defines how large the rsyslog action queue can grow in number of messages stored. This can have an impact on memory utilization. When the queue reaches 75% of this number, the queue will start writing to disk (queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and DEBUG messages will start to be discarded (queue.discardMark with queue.discardSeverity=5). (integer, default=`131072`)\n* `LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB`: Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH. (integer, default=`1`)\n* `LOG_AGGREGATOR_MAX_DISK_USAGE_PATH`: Location to persist logs that should be retried after an outage of the external log aggregator (defaults to /var/lib/awx). Equivalent to the rsyslogd queue.spoolDirectory setting. (string, default=`\"/var/lib/awx\"`)\n* `LOG_AGGREGATOR_RSYSLOGD_DEBUG`: Enabled high verbosity debugging for rsyslogd. Useful for debugging connection issues for external log aggregation. (boolean, default=`False`)\n* `API_400_ERROR_LOG_FORMAT`: The format of logged messages when an API 4XX error occurs, the following variables will be substituted: \nstatus_code - The HTTP status code of the error\nuser_name - The user name attempting to use the API\nurl_path - The URL path to the API endpoint called\nremote_addr - The remote address seen for the user\nerror - The error set by the api endpoint\nVariables need to be in the format {<variable name>}. (string, default=`\"status {status_code} received by user {user_name} attempting to access {url_path} from {remote_addr}\"`)\n* `AUTOMATION_ANALYTICS_LAST_GATHER`: (datetime, required)\n* `AUTOMATION_ANALYTICS_LAST_ENTRIES`: (string, default=`\"\"`)\n* `AUTOMATION_ANALYTICS_GATHER_INTERVAL`: Interval (in seconds) between data gathering. (integer, default=`14400`)\n\n* `BULK_JOB_MAX_LAUNCH`: Max jobs to allow bulk jobs to launch (integer, default=`100`)\n* `BULK_HOST_MAX_CREATE`: Max number of hosts to allow to be created in a single bulk action (integer, default=`100`)\n* `UI_NEXT`: Enable preview of new user interface. (boolean, default=`True`)\n* `SUBSCRIPTION_USAGE_MODEL`: (choice)\n - `\"\"`: Default model for AWX - no subscription. Deletion of host_metrics will not be considered for purposes of managed host counting (default)\n - `unique_managed_hosts`: Usage based on unique managed nodes in a large historical time frame and delete functionality for no longer used managed nodes\n* `CLEANUP_HOST_METRICS_LAST_TS`: (datetime, required)\n* `HOST_METRIC_SUMMARY_TASK_LAST_TS`: (datetime, required)\n* `AWX_CLEANUP_PATHS`: Enable or Disable TMP Dir cleanup (boolean, default=`True`)\n* `AWX_REQUEST_PROFILE`: Debug web request python timing (boolean, default=`False`)\n* `DEFAULT_CONTAINER_RUN_OPTIONS`: List of options to pass to podman run example: ['--network', 'slirp4netns:enable_ipv6=true', '--log-level', 'debug'] (list, default=`['--network', 'slirp4netns:enable_ipv6=true']`)\n* `RECEPTOR_RELEASE_WORK`: Release receptor work (boolean, default=`True`)\n* `SESSION_COOKIE_AGE`: Number of seconds that a user is inactive before they will need to login again. (integer, required)\n* `SESSIONS_PER_USER`: Maximum number of simultaneous logged in sessions a user may have. To disable enter -1. (integer, required)\n* `DISABLE_LOCAL_AUTH`: Controls whether users are prevented from using the built-in authentication system. You probably want to do this if you are using an LDAP or SAML integration. (boolean, required)\n* `AUTH_BASIC_ENABLED`: Enable HTTP Basic Auth for the API Browser. (boolean, required)\n* `OAUTH2_PROVIDER`: Dictionary for customizing OAuth 2 timeouts, available items are `ACCESS_TOKEN_EXPIRE_SECONDS`, the duration of access tokens in the number of seconds, `AUTHORIZATION_CODE_EXPIRE_SECONDS`, the duration of authorization codes in the number of seconds, and `REFRESH_TOKEN_EXPIRE_SECONDS`, the duration of refresh tokens, after expired access tokens, in the number of seconds. (nested object, default=`{'ACCESS_TOKEN_EXPIRE_SECONDS': 31536000000, 'AUTHORIZATION_CODE_EXPIRE_SECONDS': 600, 'REFRESH_TOKEN_EXPIRE_SECONDS': 2628000}`)\n* `ALLOW_OAUTH2_FOR_EXTERNAL_USERS`: For security reasons, users from external auth providers (LDAP, SAML, SSO, Radius, and others) are not allowed to create OAuth2 tokens. To change this behavior, enable this setting. Existing tokens will not be deleted when this setting is toggled off. (boolean, default=`False`)\n* `LOGIN_REDIRECT_OVERRIDE`: URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page. (string, default=`\"\"`)\n* `ALLOW_METRICS_FOR_ANONYMOUS_USERS`: If true, anonymous users are allowed to poll metrics. (boolean, default=`False`)\n\n* `CUSTOM_LOGIN_INFO`: If needed, you can add specific information (such as a legal notice or a disclaimer) to a text box in the login modal using this setting. Any content added must be in plain text or an HTML fragment, as other markup languages are not supported. (string, default=`\"\"`)\n* `CUSTOM_LOGO`: To set up a custom logo, provide a file that you create. For the custom logo to look its best, use a .png file with a transparent background. GIF, PNG and JPEG formats are supported. (string, default=`\"\"`)\n* `MAX_UI_JOB_EVENTS`: Maximum number of job events for the UI to retrieve within a single request. (integer, required)\n* `UI_LIVE_UPDATES_ENABLED`: If disabled, the page will not refresh when events are received. Reloading the page will be required to get the latest details. (boolean, required)\n\n* `SOCIAL_AUTH_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_USER_FIELDS`: When set to an empty list `[]`, this setting prevents new user accounts from being created. Only users who have previously logged in using social auth or have a user account with a matching email address will be able to login. (list, default=`None`)\n* `SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL`: Enabling this setting will tell social auth to use the full Email as username instead of the full name (boolean, default=`False`)\n* `AUTH_LDAP_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_1_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_1_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_1_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_1_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_1_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_1_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_1_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_1_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_1_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_1_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_1_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_1_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_1_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_1_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_1_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_1_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_2_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_2_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_2_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_2_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_2_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_2_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_2_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_2_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_2_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_2_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_2_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_2_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_2_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_2_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_2_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_2_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_3_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_3_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_3_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_3_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_3_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_3_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_3_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_3_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_3_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_3_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_3_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_3_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_3_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_3_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_3_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_3_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_4_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_4_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_4_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_4_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_4_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_4_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_4_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_4_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_4_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_4_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_4_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_4_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_4_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_4_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_4_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_4_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_5_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_5_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_5_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_5_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_5_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_5_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_5_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_5_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_5_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_5_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_5_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_5_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_5_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_5_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_5_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_5_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `RADIUS_SERVER`: Hostname/IP of RADIUS server. RADIUS authentication is disabled if this setting is empty. (string, default=`\"\"`)\n* `RADIUS_PORT`: Port of RADIUS server. (integer, default=`1812`)\n* `RADIUS_SECRET`: Shared secret for authenticating to RADIUS server. (string, default=`\"\"`)\n* `TACACSPLUS_HOST`: Hostname of TACACS+ server. (string, default=`\"\"`)\n* `TACACSPLUS_PORT`: Port number of TACACS+ server. (integer, default=`49`)\n* `TACACSPLUS_SECRET`: Shared secret for authenticating to TACACS+ server. (string, default=`\"\"`)\n* `TACACSPLUS_SESSION_TIMEOUT`: TACACS+ session timeout value in seconds, 0 disables timeout. (integer, default=`5`)\n* `TACACSPLUS_AUTH_PROTOCOL`: Choose the authentication protocol used by TACACS+ client. (choice)\n - `ascii` (default)\n - `pap`\n* `TACACSPLUS_REM_ADDR`: Enable the client address sending by TACACS+ client. (boolean, default=`False`)\n\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_KEY`: The OAuth2 key from your web application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET`: The OAuth2 secret from your web application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS`: Update this setting to restrict the domains who are allowed to login using Google OAuth2. (list, default=`[]`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_AUTH_EXTRA_ARGUMENTS`: Extra arguments for Google OAuth2 login. You can restrict it to only allow a single domain to authenticate, even if the user is logged in with multple Google accounts. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_KEY`: The OAuth2 key (Client ID) from your GitHub developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_SECRET`: The OAuth2 secret (Client Secret) from your GitHub developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORG_NAME`: The name of your GitHub organization, as used in your organization's URL: https://github.com/<yourorg>/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_TEAM_ID`: Find the numeric team ID using the Github API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME`: The name of your GitHub Enterprise organization, as used in your organization's URL: https://github.com/<yourorg>/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID`: Find the numeric team ID using the Github Enterprise API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_KEY`: The OAuth2 key (Client ID) from your Azure AD application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET`: The OAuth2 secret (Client Secret) from your Azure AD application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_OIDC_KEY`: The OIDC key (Client ID) from your IDP. (string, default=`\"\"`)\n* `SOCIAL_AUTH_OIDC_SECRET`: The OIDC secret (Client Secret) from your IDP. (string, default=`\"\"`)\n* `SOCIAL_AUTH_OIDC_OIDC_ENDPOINT`: The URL for your OIDC provider including the path up to /.well-known/openid-configuration (string, default=`\"\"`)\n* `SOCIAL_AUTH_OIDC_VERIFY_SSL`: Verify the OIDC provider ssl certificate. (boolean, default=`True`)\n* `SAML_AUTO_CREATE_OBJECTS`: When enabled (the default), mapped Organizations and Teams will be created automatically on successful SAML login. (boolean, default=`True`)\n\n\n* `SOCIAL_AUTH_SAML_SP_ENTITY_ID`: The application-defined unique identifier used as the audience of the SAML service provider (SP) configuration. This is usually the URL for the service. (string, default=`\"\"`)\n* `SOCIAL_AUTH_SAML_SP_PUBLIC_CERT`: Create a keypair to use as a service provider (SP) and include the certificate content here. (string, required)\n* `SOCIAL_AUTH_SAML_SP_PRIVATE_KEY`: Create a keypair to use as a service provider (SP) and include the private key content here. (string, required)\n* `SOCIAL_AUTH_SAML_ORG_INFO`: Provide the URL, display name, and the name of your app. Refer to the documentation for example syntax. (nested object, required)\n* `SOCIAL_AUTH_SAML_TECHNICAL_CONTACT`: Provide the name and email address of the technical contact for your service provider. Refer to the documentation for example syntax. (nested object, required)\n* `SOCIAL_AUTH_SAML_SUPPORT_CONTACT`: Provide the name and email address of the support contact for your service provider. Refer to the documentation for example syntax. (nested object, required)\n* `SOCIAL_AUTH_SAML_ENABLED_IDPS`: Configure the Entity ID, SSO URL and certificate for each identity provider (IdP) in use. Multiple SAML IdPs are supported. Some IdPs may provide user data using attribute names that differ from the default OIDs. Attribute names may be overridden for each IdP. Refer to the Ansible documentation for additional details and syntax. (nested object, default=`{}`)\n* `SOCIAL_AUTH_SAML_SECURITY_CONFIG`: A dict of key value pairs that are passed to the underlying python-saml security setting https://github.com/onelogin/python-saml#settings (nested object, default=`{'requestedAuthnContext': False}`)\n* `SOCIAL_AUTH_SAML_SP_EXTRA`: A dict of key value pairs to be passed to the underlying python-saml Service Provider configuration setting. (nested object, default=`None`)\n* `SOCIAL_AUTH_SAML_EXTRA_DATA`: A list of tuples that maps IDP attributes to extra_attributes. Each attribute will be a list of values, even if only 1 value. (list, default=`None`)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_SAML_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_ATTR`: Used to translate user organization membership. (nested object, default=`{}`)\n* `SOCIAL_AUTH_SAML_TEAM_ATTR`: Used to translate user team membership. (nested object, default=`{}`)\n* `SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR`: Used to map super users and system auditors from SAML. (nested object, default=`{}`)\n* `LOCAL_PASSWORD_MIN_LENGTH`: Minimum number of characters required in a local password. 0 means no minimum (integer, default=`0`)\n* `LOCAL_PASSWORD_MIN_DIGITS`: Minimum number of digit characters required in a local password. 0 means no minimum (integer, default=`0`)\n* `LOCAL_PASSWORD_MIN_UPPER`: Minimum number of uppercase characters required in a local password. 0 means no minimum (integer, default=`0`)\n* `LOCAL_PASSWORD_MIN_SPECIAL`: Minimum number of special characters required in a local password. 0 means no minimum (integer, default=`0`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Setting:\n\nMake a DELETE request to this resource to delete this setting.", + "operationId": "api_settings_read", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL": "", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_CALLBACK_URL": "https://towerhost/sso/complete/github-enterprise/", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY": "", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_ORGANIZATION_MAP": null, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET": "", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP": null, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_URL": "" + } + }, + "schema": { + "$ref": "#/definitions/SettingSingleton" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single setting", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "category_slug", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `ACTIVITY_STREAM_ENABLED`: Enable capturing activity for the activity stream. (boolean)\n* `ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC`: Enable capturing activity for the activity stream when running inventory sync. (boolean)\n* `ORG_ADMINS_CAN_SEE_ALL_USERS`: Controls whether any Organization Admin can view all users and teams, even those not associated with their Organization. (boolean)\n* `MANAGE_ORGANIZATION_AUTH`: Controls whether any Organization Admin has the privileges to create and manage users and teams. You may want to disable this ability if you are using an LDAP or SAML integration. (boolean)\n* `TOWER_URL_BASE`: This setting is used by services like notifications to render a valid url to the service. (string)\n* `REMOTE_HOST_HEADERS`: HTTP headers and meta keys to search to determine remote host name or IP. Add additional items to this list, such as "HTTP_X_FORWARDED_FOR", if behind a reverse proxy. See the "Proxy Support" section of the AAP Installation guide for more details. (list)\n* `PROXY_IP_ALLOWED_LIST`: If the service is behind a reverse proxy/load balancer, use this setting to configure the proxy IP addresses from which the service should trust custom REMOTE_HOST_HEADERS header values. If this setting is an empty list (the default), the headers specified by REMOTE_HOST_HEADERS will be trusted unconditionally') (list)\n* `CSRF_TRUSTED_ORIGINS`: If the service is behind a reverse proxy/load balancer, use this setting to configure the schema://addresses from which the service should trust Origin header values. (list)\n* `LICENSE`: The license controls which features and functionality are enabled. Use /api/v2/config/ to update or change the license. (nested object)\n* `REDHAT_USERNAME`: This username is used to send data to Automation Analytics (string)\n* `REDHAT_PASSWORD`: This password is used to send data to Automation Analytics (string)\n* `SUBSCRIPTIONS_USERNAME`: This username is used to retrieve subscription and content information (string)\n* `SUBSCRIPTIONS_PASSWORD`: This password is used to retrieve subscription and content information (string)\n* `AUTOMATION_ANALYTICS_URL`: This setting is used to to configure the upload URL for data collection for Automation Analytics. (string)\n* `INSTALL_UUID`: (string)\n* `DEFAULT_CONTROL_PLANE_QUEUE_NAME`: (string)\n* `DEFAULT_EXECUTION_QUEUE_NAME`: (string)\n* `DEFAULT_EXECUTION_ENVIRONMENT`: The Execution Environment to be used when one has not been configured for a job template. (field)\n* `CUSTOM_VENV_PATHS`: Paths where Tower will look for custom virtual environments (in addition to /var/lib/awx/venv/). Enter one path per line. (list)\n* `AD_HOC_COMMANDS`: List of modules allowed to be used by ad-hoc jobs. (list)\n* `ALLOW_JINJA_IN_EXTRA_VARS`: Ansible allows variable substitution via the Jinja2 templating language for --extra-vars. This poses a potential security risk where users with the ability to specify extra vars at job launch time can use Jinja2 templates to run arbitrary Python. It is recommended that this value be set to "template" or "never". (choice)\n - `always`: Always\n - `never`: Never\n - `template`: Only On Job Template Definitions\n* `AWX_ISOLATION_BASE_PATH`: The directory in which the service will create new temporary directories for job execution and isolation (such as credential files). (string)\n* `AWX_ISOLATION_SHOW_PATHS`: List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. Volumes will be mounted from the execution node to the container. The supported format is HOST-DIR[:CONTAINER-DIR[:OPTIONS]]. (list)\n* `AWX_TASK_ENV`: Additional environment variables set for playbook runs, inventory updates, project updates, and notification sending. (nested object)\n* `AWX_RUNNER_KEEPALIVE_SECONDS`: Only applies to jobs running in a Container Group. If not 0, send a message every so-many seconds to keep connection open. (integer)\n* `GALAXY_TASK_ENV`: Additional environment variables set for invocations of ansible-galaxy within project updates. Useful if you must use a proxy server for ansible-galaxy but not git. (nested object)\n* `INSIGHTS_TRACKING_STATE`: Enables the service to gather data on automation and send it to Automation Analytics. (boolean)\n* `PROJECT_UPDATE_VVV`: Adds the CLI -vvv flag to ansible-playbook runs of project_update.yml used for project updates. (boolean)\n* `AWX_ROLES_ENABLED`: Allows roles to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean)\n* `AWX_COLLECTIONS_ENABLED`: Allows collections to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean)\n* `AWX_SHOW_PLAYBOOK_LINKS`: Follow symbolic links when scanning for playbooks. Be aware that setting this to True can lead to infinite recursion if a link points to a parent directory of itself. (boolean)\n* `AWX_MOUNT_ISOLATED_PATHS_ON_K8S`: Expose paths via hostPath for the Pods created by a Container Group. HostPath volumes present many security risks, and it is a best practice to avoid the use of HostPaths when possible. (boolean)\n* `GALAXY_IGNORE_CERTS`: If set to true, certificate validation will not be done when installing content from any Galaxy server. (boolean)\n* `STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display before requiring the output be downloaded. (integer)\n* `EVENT_STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display for a single job or ad hoc command event. `stdout` will end with `\u2026` when truncated. (integer)\n* `MAX_WEBSOCKET_EVENT_RATE`: Maximum number of messages to update the UI live job output with per second. Value of 0 means no limit. (integer)\n* `SCHEDULE_MAX_JOBS`: Maximum number of the same job template that can be waiting to run when launching from a schedule before no more are created. (integer)\n* `AWX_ANSIBLE_CALLBACK_PLUGINS`: List of paths to search for extra callback plugins to be used when running jobs. Enter one path per line. (list)\n* `DEFAULT_JOB_TIMEOUT`: Maximum time in seconds to allow jobs to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual job template will override this. (integer)\n* `DEFAULT_JOB_IDLE_TIMEOUT`: If no output is detected from ansible in this number of seconds the execution will be terminated. Use value of 0 to indicate that no idle timeout should be imposed. (integer)\n* `DEFAULT_INVENTORY_UPDATE_TIMEOUT`: Maximum time in seconds to allow inventory updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual inventory source will override this. (integer)\n* `DEFAULT_PROJECT_UPDATE_TIMEOUT`: Maximum time in seconds to allow project updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual project will override this. (integer)\n* `ANSIBLE_FACT_CACHE_TIMEOUT`: Maximum time, in seconds, that stored Ansible facts are considered valid since the last time they were modified. Only valid, non-stale, facts will be accessible by a playbook. Note, this does not influence the deletion of ansible_facts from the database. Use a value of 0 to indicate that no timeout should be imposed. (integer)\n* `MAX_FORKS`: Saving a Job Template with more than this number of forks will result in an error. When set to 0, no limit is applied. (integer)\n* `LOG_AGGREGATOR_HOST`: Hostname/IP where external logs will be sent to. (string)\n* `LOG_AGGREGATOR_PORT`: Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator). (integer)\n* `LOG_AGGREGATOR_TYPE`: Format messages for the chosen log aggregator. (choice)\n - `None`: ---------\n - `logstash`\n - `splunk`\n - `loggly`\n - `sumologic`\n - `other`\n* `LOG_AGGREGATOR_USERNAME`: Username for external log aggregator (if required; HTTP/s only). (string)\n* `LOG_AGGREGATOR_PASSWORD`: Password or authentication token for external log aggregator (if required; HTTP/s only). (string)\n* `LOG_AGGREGATOR_LOGGERS`: List of loggers that will send HTTP logs to the collector, these can include any or all of: \nawx - service logs\nactivity_stream - activity stream records\njob_events - callback data from Ansible job events\nsystem_tracking - facts gathered from scan jobs\nbroadcast_websocket - errors pertaining to websockets broadcast metrics\n (list)\n* `LOG_AGGREGATOR_INDIVIDUAL_FACTS`: If set, system tracking facts will be sent for each package, service, or other item found in a scan, allowing for greater search query granularity. If unset, facts will be sent as a single dictionary, allowing for greater efficiency in fact processing. (boolean)\n* `LOG_AGGREGATOR_ENABLED`: Enable sending logs to external log aggregator. (boolean)\n* `LOG_AGGREGATOR_TOWER_UUID`: Useful to uniquely identify instances. (string)\n* `LOG_AGGREGATOR_PROTOCOL`: Protocol used to communicate with log aggregator. HTTPS/HTTP assumes HTTPS unless http:// is explicitly used in the Logging Aggregator hostname. (choice)\n - `https`: HTTPS/HTTP\n - `tcp`: TCP\n - `udp`: UDP\n* `LOG_AGGREGATOR_TCP_TIMEOUT`: Number of seconds for a TCP connection to external log aggregator to timeout. Applies to HTTPS and TCP log aggregator protocols. (integer)\n* `LOG_AGGREGATOR_VERIFY_CERT`: Flag to control enable/disable of certificate verification when LOG_AGGREGATOR_PROTOCOL is "https". If enabled, the log handler will verify certificate sent by external log aggregator before establishing connection. (boolean)\n* `LOG_AGGREGATOR_LEVEL`: Level threshold used by log handler. Severities from lowest to highest are DEBUG, INFO, WARNING, ERROR, CRITICAL. Messages less severe than the threshold will be ignored by log handler. (messages under category awx.anlytics ignore this setting) (choice)\n - `DEBUG`\n - `INFO`\n - `WARNING`\n - `ERROR`\n - `CRITICAL`\n* `LOG_AGGREGATOR_ACTION_QUEUE_SIZE`: Defines how large the rsyslog action queue can grow in number of messages stored. This can have an impact on memory utilization. When the queue reaches 75% of this number, the queue will start writing to disk (queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and DEBUG messages will start to be discarded (queue.discardMark with queue.discardSeverity=5). (integer)\n* `LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB`: Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH. (integer)\n* `LOG_AGGREGATOR_MAX_DISK_USAGE_PATH`: Location to persist logs that should be retried after an outage of the external log aggregator (defaults to /var/lib/awx). Equivalent to the rsyslogd queue.spoolDirectory setting. (string)\n* `LOG_AGGREGATOR_RSYSLOGD_DEBUG`: Enabled high verbosity debugging for rsyslogd. Useful for debugging connection issues for external log aggregation. (boolean)\n* `API_400_ERROR_LOG_FORMAT`: The format of logged messages when an API 4XX error occurs, the following variables will be substituted: \nstatus_code - The HTTP status code of the error\nuser_name - The user name attempting to use the API\nurl_path - The URL path to the API endpoint called\nremote_addr - The remote address seen for the user\nerror - The error set by the api endpoint\nVariables need to be in the format {<variable name>}. (string)\n* `AUTOMATION_ANALYTICS_LAST_GATHER`: (datetime)\n* `AUTOMATION_ANALYTICS_LAST_ENTRIES`: (string)\n* `AUTOMATION_ANALYTICS_GATHER_INTERVAL`: Interval (in seconds) between data gathering. (integer)\n* `IS_K8S`: Indicates whether the instance is part of a kubernetes-based deployment. (boolean)\n* `BULK_JOB_MAX_LAUNCH`: Max jobs to allow bulk jobs to launch (integer)\n* `BULK_HOST_MAX_CREATE`: Max number of hosts to allow to be created in a single bulk action (integer)\n* `UI_NEXT`: Enable preview of new user interface. (boolean)\n* `SUBSCRIPTION_USAGE_MODEL`: (choice)\n - `\"\"`: Default model for AWX - no subscription. Deletion of host_metrics will not be considered for purposes of managed host counting\n - `unique_managed_hosts`: Usage based on unique managed nodes in a large historical time frame and delete functionality for no longer used managed nodes\n* `CLEANUP_HOST_METRICS_LAST_TS`: (datetime)\n* `HOST_METRIC_SUMMARY_TASK_LAST_TS`: (datetime)\n* `AWX_CLEANUP_PATHS`: Enable or Disable TMP Dir cleanup (boolean)\n* `AWX_REQUEST_PROFILE`: Debug web request python timing (boolean)\n* `DEFAULT_CONTAINER_RUN_OPTIONS`: List of options to pass to podman run example: ['--network', 'slirp4netns:enable_ipv6=true', '--log-level', 'debug'] (list)\n* `RECEPTOR_RELEASE_WORK`: Release receptor work (boolean)\n* `SESSION_COOKIE_AGE`: Number of seconds that a user is inactive before they will need to login again. (integer)\n* `SESSIONS_PER_USER`: Maximum number of simultaneous logged in sessions a user may have. To disable enter -1. (integer)\n* `DISABLE_LOCAL_AUTH`: Controls whether users are prevented from using the built-in authentication system. You probably want to do this if you are using an LDAP or SAML integration. (boolean)\n* `AUTH_BASIC_ENABLED`: Enable HTTP Basic Auth for the API Browser. (boolean)\n* `OAUTH2_PROVIDER`: Dictionary for customizing OAuth 2 timeouts, available items are `ACCESS_TOKEN_EXPIRE_SECONDS`, the duration of access tokens in the number of seconds, `AUTHORIZATION_CODE_EXPIRE_SECONDS`, the duration of authorization codes in the number of seconds, and `REFRESH_TOKEN_EXPIRE_SECONDS`, the duration of refresh tokens, after expired access tokens, in the number of seconds. (nested object)\n* `ALLOW_OAUTH2_FOR_EXTERNAL_USERS`: For security reasons, users from external auth providers (LDAP, SAML, SSO, Radius, and others) are not allowed to create OAuth2 tokens. To change this behavior, enable this setting. Existing tokens will not be deleted when this setting is toggled off. (boolean)\n* `LOGIN_REDIRECT_OVERRIDE`: URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page. (string)\n* `ALLOW_METRICS_FOR_ANONYMOUS_USERS`: If true, anonymous users are allowed to poll metrics. (boolean)\n* `PENDO_TRACKING_STATE`: Enable or Disable User Analytics Tracking. (choice)\n - `off`: Off\n - `anonymous`: Anonymous\n - `detailed`: Detailed\n* `CUSTOM_LOGIN_INFO`: If needed, you can add specific information (such as a legal notice or a disclaimer) to a text box in the login modal using this setting. Any content added must be in plain text or an HTML fragment, as other markup languages are not supported. (string)\n* `CUSTOM_LOGO`: To set up a custom logo, provide a file that you create. For the custom logo to look its best, use a .png file with a transparent background. GIF, PNG and JPEG formats are supported. (string)\n* `MAX_UI_JOB_EVENTS`: Maximum number of job events for the UI to retrieve within a single request. (integer)\n* `UI_LIVE_UPDATES_ENABLED`: If disabled, the page will not refresh when events are received. Reloading the page will be required to get the latest details. (boolean)\n* `AUTHENTICATION_BACKENDS`: List of authentication backends that are enabled based on license features and other authentication settings. (list)\n* `SOCIAL_AUTH_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_USER_FIELDS`: When set to an empty list `[]`, this setting prevents new user accounts from being created. Only users who have previously logged in using social auth or have a user account with a matching email address will be able to login. (list)\n* `SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL`: Enabling this setting will tell social auth to use the full Email as username instead of the full name (boolean)\n* `AUTH_LDAP_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_1_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_1_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_1_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_1_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_1_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_1_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_1_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_1_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_1_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_1_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_1_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_1_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_1_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_1_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_1_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_1_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_2_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_2_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_2_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_2_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_2_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_2_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_2_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_2_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_2_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_2_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_2_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_2_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_2_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_2_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_2_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_2_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_3_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_3_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_3_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_3_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_3_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_3_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_3_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_3_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_3_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_3_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_3_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_3_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_3_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_3_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_3_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_3_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_4_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_4_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_4_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_4_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_4_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_4_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_4_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_4_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_4_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_4_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_4_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_4_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_4_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_4_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_4_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_4_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_5_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_5_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_5_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_5_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_5_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_5_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_5_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_5_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_5_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_5_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_5_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_5_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_5_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_5_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_5_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_5_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `RADIUS_SERVER`: Hostname/IP of RADIUS server. RADIUS authentication is disabled if this setting is empty. (string)\n* `RADIUS_PORT`: Port of RADIUS server. (integer)\n* `RADIUS_SECRET`: Shared secret for authenticating to RADIUS server. (string)\n* `TACACSPLUS_HOST`: Hostname of TACACS+ server. (string)\n* `TACACSPLUS_PORT`: Port number of TACACS+ server. (integer)\n* `TACACSPLUS_SECRET`: Shared secret for authenticating to TACACS+ server. (string)\n* `TACACSPLUS_SESSION_TIMEOUT`: TACACS+ session timeout value in seconds, 0 disables timeout. (integer)\n* `TACACSPLUS_AUTH_PROTOCOL`: Choose the authentication protocol used by TACACS+ client. (choice)\n - `ascii`\n - `pap`\n* `TACACSPLUS_REM_ADDR`: Enable the client address sending by TACACS+ client. (boolean)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_KEY`: The OAuth2 key from your web application. (string)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET`: The OAuth2 secret from your web application. (string)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS`: Update this setting to restrict the domains who are allowed to login using Google OAuth2. (list)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_AUTH_EXTRA_ARGUMENTS`: Extra arguments for Google OAuth2 login. You can restrict it to only allow a single domain to authenticate, even if the user is logged in with multple Google accounts. Refer to the documentation for more detail. (nested object)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_KEY`: The OAuth2 key (Client ID) from your GitHub developer application. (string)\n* `SOCIAL_AUTH_GITHUB_SECRET`: The OAuth2 secret (Client Secret) from your GitHub developer application. (string)\n* `SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ORG_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_NAME`: The name of your GitHub organization, as used in your organization's URL: https://github.com/<yourorg>/. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_TEAM_CALLBACK_URL`: Create an organization-owned application at https://github.com/organizations/<yourorg>/settings/applications and obtain an OAuth2 key (Client ID) and secret (Client Secret). Provide this URL as the callback URL for your application. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_ID`: Find the numeric team ID using the Github API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise developer application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise developer application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME`: The name of your GitHub Enterprise organization, as used in your organization's URL: https://github.com/<yourorg>/. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_CALLBACK_URL`: Create an organization-owned application at https://github.com/organizations/<yourorg>/settings/applications and obtain an OAuth2 key (Client ID) and secret (Client Secret). Provide this URL as the callback URL for your application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID`: Find the numeric team ID using the Github Enterprise API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_KEY`: The OAuth2 key (Client ID) from your Azure AD application. (string)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET`: The OAuth2 secret (Client Secret) from your Azure AD application. (string)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_OIDC_KEY`: The OIDC key (Client ID) from your IDP. (string)\n* `SOCIAL_AUTH_OIDC_SECRET`: The OIDC secret (Client Secret) from your IDP. (string)\n* `SOCIAL_AUTH_OIDC_OIDC_ENDPOINT`: The URL for your OIDC provider including the path up to /.well-known/openid-configuration (string)\n* `SOCIAL_AUTH_OIDC_VERIFY_SSL`: Verify the OIDC provider ssl certificate. (boolean)\n* `SAML_AUTO_CREATE_OBJECTS`: When enabled (the default), mapped Organizations and Teams will be created automatically on successful SAML login. (boolean)\n* `SOCIAL_AUTH_SAML_CALLBACK_URL`: Register the service as a service provider (SP) with each identity provider (IdP) you have configured. Provide your SP Entity ID and this ACS URL for your application. (string)\n* `SOCIAL_AUTH_SAML_METADATA_URL`: If your identity provider (IdP) allows uploading an XML metadata file, you can download one from this URL. (string)\n* `SOCIAL_AUTH_SAML_SP_ENTITY_ID`: The application-defined unique identifier used as the audience of the SAML service provider (SP) configuration. This is usually the URL for the service. (string)\n* `SOCIAL_AUTH_SAML_SP_PUBLIC_CERT`: Create a keypair to use as a service provider (SP) and include the certificate content here. (string)\n* `SOCIAL_AUTH_SAML_SP_PRIVATE_KEY`: Create a keypair to use as a service provider (SP) and include the private key content here. (string)\n* `SOCIAL_AUTH_SAML_ORG_INFO`: Provide the URL, display name, and the name of your app. Refer to the documentation for example syntax. (nested object)\n* `SOCIAL_AUTH_SAML_TECHNICAL_CONTACT`: Provide the name and email address of the technical contact for your service provider. Refer to the documentation for example syntax. (nested object)\n* `SOCIAL_AUTH_SAML_SUPPORT_CONTACT`: Provide the name and email address of the support contact for your service provider. Refer to the documentation for example syntax. (nested object)\n* `SOCIAL_AUTH_SAML_ENABLED_IDPS`: Configure the Entity ID, SSO URL and certificate for each identity provider (IdP) in use. Multiple SAML IdPs are supported. Some IdPs may provide user data using attribute names that differ from the default OIDs. Attribute names may be overridden for each IdP. Refer to the Ansible documentation for additional details and syntax. (nested object)\n* `SOCIAL_AUTH_SAML_SECURITY_CONFIG`: A dict of key value pairs that are passed to the underlying python-saml security setting https://github.com/onelogin/python-saml#settings (nested object)\n* `SOCIAL_AUTH_SAML_SP_EXTRA`: A dict of key value pairs to be passed to the underlying python-saml Service Provider configuration setting. (nested object)\n* `SOCIAL_AUTH_SAML_EXTRA_DATA`: A list of tuples that maps IDP attributes to extra_attributes. Each attribute will be a list of values, even if only 1 value. (list)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_SAML_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_ATTR`: Used to translate user organization membership. (nested object)\n* `SOCIAL_AUTH_SAML_TEAM_ATTR`: Used to translate user team membership. (nested object)\n* `SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR`: Used to map super users and system auditors from SAML. (nested object)\n* `LOCAL_PASSWORD_MIN_LENGTH`: Minimum number of characters required in a local password. 0 means no minimum (integer)\n* `LOCAL_PASSWORD_MIN_DIGITS`: Minimum number of digit characters required in a local password. 0 means no minimum (integer)\n* `LOCAL_PASSWORD_MIN_UPPER`: Minimum number of uppercase characters required in a local password. 0 means no minimum (integer)\n* `LOCAL_PASSWORD_MIN_SPECIAL`: Minimum number of special characters required in a local password. 0 means no minimum (integer)\n\n\n\n\n\n# Update a Setting:\n\nMake a PUT or PATCH request to this resource to update this\nsetting. The following fields may be modified:\n\n\n* `ACTIVITY_STREAM_ENABLED`: Enable capturing activity for the activity stream. (boolean, required)\n* `ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC`: Enable capturing activity for the activity stream when running inventory sync. (boolean, required)\n* `ORG_ADMINS_CAN_SEE_ALL_USERS`: Controls whether any Organization Admin can view all users and teams, even those not associated with their Organization. (boolean, required)\n* `MANAGE_ORGANIZATION_AUTH`: Controls whether any Organization Admin has the privileges to create and manage users and teams. You may want to disable this ability if you are using an LDAP or SAML integration. (boolean, required)\n* `TOWER_URL_BASE`: This setting is used by services like notifications to render a valid url to the service. (string, required)\n* `REMOTE_HOST_HEADERS`: HTTP headers and meta keys to search to determine remote host name or IP. Add additional items to this list, such as "HTTP_X_FORWARDED_FOR", if behind a reverse proxy. See the "Proxy Support" section of the AAP Installation guide for more details. (list, required)\n* `PROXY_IP_ALLOWED_LIST`: If the service is behind a reverse proxy/load balancer, use this setting to configure the proxy IP addresses from which the service should trust custom REMOTE_HOST_HEADERS header values. If this setting is an empty list (the default), the headers specified by REMOTE_HOST_HEADERS will be trusted unconditionally') (list, required)\n* `CSRF_TRUSTED_ORIGINS`: If the service is behind a reverse proxy/load balancer, use this setting to configure the schema://addresses from which the service should trust Origin header values. (list, default=`[]`)\n\n* `REDHAT_USERNAME`: This username is used to send data to Automation Analytics (string, default=`\"\"`)\n* `REDHAT_PASSWORD`: This password is used to send data to Automation Analytics (string, default=`\"\"`)\n* `SUBSCRIPTIONS_USERNAME`: This username is used to retrieve subscription and content information (string, default=`\"\"`)\n* `SUBSCRIPTIONS_PASSWORD`: This password is used to retrieve subscription and content information (string, default=`\"\"`)\n* `AUTOMATION_ANALYTICS_URL`: This setting is used to to configure the upload URL for data collection for Automation Analytics. (string, default=`\"https://example.com\"`)\n\n\n\n* `DEFAULT_EXECUTION_ENVIRONMENT`: The Execution Environment to be used when one has not been configured for a job template. (field, default=`None`)\n* `CUSTOM_VENV_PATHS`: Paths where Tower will look for custom virtual environments (in addition to /var/lib/awx/venv/). Enter one path per line. (list, default=`[]`)\n* `AD_HOC_COMMANDS`: List of modules allowed to be used by ad-hoc jobs. (list, default=`['command', 'shell', 'yum', 'apt', 'apt_key', 'apt_repository', 'apt_rpm', 'service', 'group', 'user', 'mount', 'ping', 'selinux', 'setup', 'win_ping', 'win_service', 'win_updates', 'win_group', 'win_user']`)\n* `ALLOW_JINJA_IN_EXTRA_VARS`: Ansible allows variable substitution via the Jinja2 templating language for --extra-vars. This poses a potential security risk where users with the ability to specify extra vars at job launch time can use Jinja2 templates to run arbitrary Python. It is recommended that this value be set to "template" or "never". (choice, required)\n - `always`: Always\n - `never`: Never\n - `template`: Only On Job Template Definitions (default)\n* `AWX_ISOLATION_BASE_PATH`: The directory in which the service will create new temporary directories for job execution and isolation (such as credential files). (string, required)\n* `AWX_ISOLATION_SHOW_PATHS`: List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. Volumes will be mounted from the execution node to the container. The supported format is HOST-DIR[:CONTAINER-DIR[:OPTIONS]]. (list, default=`[]`)\n* `AWX_TASK_ENV`: Additional environment variables set for playbook runs, inventory updates, project updates, and notification sending. (nested object, default=`{}`)\n* `AWX_RUNNER_KEEPALIVE_SECONDS`: Only applies to jobs running in a Container Group. If not 0, send a message every so-many seconds to keep connection open. (integer, required)\n* `GALAXY_TASK_ENV`: Additional environment variables set for invocations of ansible-galaxy within project updates. Useful if you must use a proxy server for ansible-galaxy but not git. (nested object, required)\n* `INSIGHTS_TRACKING_STATE`: Enables the service to gather data on automation and send it to Automation Analytics. (boolean, default=`False`)\n* `PROJECT_UPDATE_VVV`: Adds the CLI -vvv flag to ansible-playbook runs of project_update.yml used for project updates. (boolean, required)\n* `AWX_ROLES_ENABLED`: Allows roles to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean, default=`True`)\n* `AWX_COLLECTIONS_ENABLED`: Allows collections to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean, default=`True`)\n* `AWX_SHOW_PLAYBOOK_LINKS`: Follow symbolic links when scanning for playbooks. Be aware that setting this to True can lead to infinite recursion if a link points to a parent directory of itself. (boolean, default=`False`)\n* `AWX_MOUNT_ISOLATED_PATHS_ON_K8S`: Expose paths via hostPath for the Pods created by a Container Group. HostPath volumes present many security risks, and it is a best practice to avoid the use of HostPaths when possible. (boolean, default=`False`)\n* `GALAXY_IGNORE_CERTS`: If set to true, certificate validation will not be done when installing content from any Galaxy server. (boolean, default=`False`)\n* `STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display before requiring the output be downloaded. (integer, required)\n* `EVENT_STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display for a single job or ad hoc command event. `stdout` will end with `\u2026` when truncated. (integer, required)\n* `MAX_WEBSOCKET_EVENT_RATE`: Maximum number of messages to update the UI live job output with per second. Value of 0 means no limit. (integer, default=`30`)\n* `SCHEDULE_MAX_JOBS`: Maximum number of the same job template that can be waiting to run when launching from a schedule before no more are created. (integer, required)\n* `AWX_ANSIBLE_CALLBACK_PLUGINS`: List of paths to search for extra callback plugins to be used when running jobs. Enter one path per line. (list, default=`[]`)\n* `DEFAULT_JOB_TIMEOUT`: Maximum time in seconds to allow jobs to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual job template will override this. (integer, default=`0`)\n* `DEFAULT_JOB_IDLE_TIMEOUT`: If no output is detected from ansible in this number of seconds the execution will be terminated. Use value of 0 to indicate that no idle timeout should be imposed. (integer, default=`0`)\n* `DEFAULT_INVENTORY_UPDATE_TIMEOUT`: Maximum time in seconds to allow inventory updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual inventory source will override this. (integer, default=`0`)\n* `DEFAULT_PROJECT_UPDATE_TIMEOUT`: Maximum time in seconds to allow project updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual project will override this. (integer, default=`0`)\n* `ANSIBLE_FACT_CACHE_TIMEOUT`: Maximum time, in seconds, that stored Ansible facts are considered valid since the last time they were modified. Only valid, non-stale, facts will be accessible by a playbook. Note, this does not influence the deletion of ansible_facts from the database. Use a value of 0 to indicate that no timeout should be imposed. (integer, default=`0`)\n* `MAX_FORKS`: Saving a Job Template with more than this number of forks will result in an error. When set to 0, no limit is applied. (integer, default=`200`)\n* `LOG_AGGREGATOR_HOST`: Hostname/IP where external logs will be sent to. (string, default=`\"\"`)\n* `LOG_AGGREGATOR_PORT`: Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator). (integer, default=`None`)\n* `LOG_AGGREGATOR_TYPE`: Format messages for the chosen log aggregator. (choice)\n - `None`: --------- (default)\n - `logstash`\n - `splunk`\n - `loggly`\n - `sumologic`\n - `other`\n* `LOG_AGGREGATOR_USERNAME`: Username for external log aggregator (if required; HTTP/s only). (string, default=`\"\"`)\n* `LOG_AGGREGATOR_PASSWORD`: Password or authentication token for external log aggregator (if required; HTTP/s only). (string, default=`\"\"`)\n* `LOG_AGGREGATOR_LOGGERS`: List of loggers that will send HTTP logs to the collector, these can include any or all of: \nawx - service logs\nactivity_stream - activity stream records\njob_events - callback data from Ansible job events\nsystem_tracking - facts gathered from scan jobs\nbroadcast_websocket - errors pertaining to websockets broadcast metrics\n (list, default=`['awx', 'activity_stream', 'job_events', 'system_tracking', 'broadcast_websocket']`)\n* `LOG_AGGREGATOR_INDIVIDUAL_FACTS`: If set, system tracking facts will be sent for each package, service, or other item found in a scan, allowing for greater search query granularity. If unset, facts will be sent as a single dictionary, allowing for greater efficiency in fact processing. (boolean, default=`False`)\n* `LOG_AGGREGATOR_ENABLED`: Enable sending logs to external log aggregator. (boolean, default=`False`)\n* `LOG_AGGREGATOR_TOWER_UUID`: Useful to uniquely identify instances. (string, default=`\"\"`)\n* `LOG_AGGREGATOR_PROTOCOL`: Protocol used to communicate with log aggregator. HTTPS/HTTP assumes HTTPS unless http:// is explicitly used in the Logging Aggregator hostname. (choice)\n - `https`: HTTPS/HTTP (default)\n - `tcp`: TCP\n - `udp`: UDP\n* `LOG_AGGREGATOR_TCP_TIMEOUT`: Number of seconds for a TCP connection to external log aggregator to timeout. Applies to HTTPS and TCP log aggregator protocols. (integer, default=`5`)\n* `LOG_AGGREGATOR_VERIFY_CERT`: Flag to control enable/disable of certificate verification when LOG_AGGREGATOR_PROTOCOL is "https". If enabled, the log handler will verify certificate sent by external log aggregator before establishing connection. (boolean, default=`True`)\n* `LOG_AGGREGATOR_LEVEL`: Level threshold used by log handler. Severities from lowest to highest are DEBUG, INFO, WARNING, ERROR, CRITICAL. Messages less severe than the threshold will be ignored by log handler. (messages under category awx.anlytics ignore this setting) (choice)\n - `DEBUG`\n - `INFO` (default)\n - `WARNING`\n - `ERROR`\n - `CRITICAL`\n* `LOG_AGGREGATOR_ACTION_QUEUE_SIZE`: Defines how large the rsyslog action queue can grow in number of messages stored. This can have an impact on memory utilization. When the queue reaches 75% of this number, the queue will start writing to disk (queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and DEBUG messages will start to be discarded (queue.discardMark with queue.discardSeverity=5). (integer, default=`131072`)\n* `LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB`: Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH. (integer, default=`1`)\n* `LOG_AGGREGATOR_MAX_DISK_USAGE_PATH`: Location to persist logs that should be retried after an outage of the external log aggregator (defaults to /var/lib/awx). Equivalent to the rsyslogd queue.spoolDirectory setting. (string, default=`\"/var/lib/awx\"`)\n* `LOG_AGGREGATOR_RSYSLOGD_DEBUG`: Enabled high verbosity debugging for rsyslogd. Useful for debugging connection issues for external log aggregation. (boolean, default=`False`)\n* `API_400_ERROR_LOG_FORMAT`: The format of logged messages when an API 4XX error occurs, the following variables will be substituted: \nstatus_code - The HTTP status code of the error\nuser_name - The user name attempting to use the API\nurl_path - The URL path to the API endpoint called\nremote_addr - The remote address seen for the user\nerror - The error set by the api endpoint\nVariables need to be in the format {<variable name>}. (string, default=`\"status {status_code} received by user {user_name} attempting to access {url_path} from {remote_addr}\"`)\n* `AUTOMATION_ANALYTICS_LAST_GATHER`: (datetime, required)\n* `AUTOMATION_ANALYTICS_LAST_ENTRIES`: (string, default=`\"\"`)\n* `AUTOMATION_ANALYTICS_GATHER_INTERVAL`: Interval (in seconds) between data gathering. (integer, default=`14400`)\n\n* `BULK_JOB_MAX_LAUNCH`: Max jobs to allow bulk jobs to launch (integer, default=`100`)\n* `BULK_HOST_MAX_CREATE`: Max number of hosts to allow to be created in a single bulk action (integer, default=`100`)\n* `UI_NEXT`: Enable preview of new user interface. (boolean, default=`True`)\n* `SUBSCRIPTION_USAGE_MODEL`: (choice)\n - `\"\"`: Default model for AWX - no subscription. Deletion of host_metrics will not be considered for purposes of managed host counting (default)\n - `unique_managed_hosts`: Usage based on unique managed nodes in a large historical time frame and delete functionality for no longer used managed nodes\n* `CLEANUP_HOST_METRICS_LAST_TS`: (datetime, required)\n* `HOST_METRIC_SUMMARY_TASK_LAST_TS`: (datetime, required)\n* `AWX_CLEANUP_PATHS`: Enable or Disable TMP Dir cleanup (boolean, default=`True`)\n* `AWX_REQUEST_PROFILE`: Debug web request python timing (boolean, default=`False`)\n* `DEFAULT_CONTAINER_RUN_OPTIONS`: List of options to pass to podman run example: ['--network', 'slirp4netns:enable_ipv6=true', '--log-level', 'debug'] (list, default=`['--network', 'slirp4netns:enable_ipv6=true']`)\n* `RECEPTOR_RELEASE_WORK`: Release receptor work (boolean, default=`True`)\n* `SESSION_COOKIE_AGE`: Number of seconds that a user is inactive before they will need to login again. (integer, required)\n* `SESSIONS_PER_USER`: Maximum number of simultaneous logged in sessions a user may have. To disable enter -1. (integer, required)\n* `DISABLE_LOCAL_AUTH`: Controls whether users are prevented from using the built-in authentication system. You probably want to do this if you are using an LDAP or SAML integration. (boolean, required)\n* `AUTH_BASIC_ENABLED`: Enable HTTP Basic Auth for the API Browser. (boolean, required)\n* `OAUTH2_PROVIDER`: Dictionary for customizing OAuth 2 timeouts, available items are `ACCESS_TOKEN_EXPIRE_SECONDS`, the duration of access tokens in the number of seconds, `AUTHORIZATION_CODE_EXPIRE_SECONDS`, the duration of authorization codes in the number of seconds, and `REFRESH_TOKEN_EXPIRE_SECONDS`, the duration of refresh tokens, after expired access tokens, in the number of seconds. (nested object, default=`{'ACCESS_TOKEN_EXPIRE_SECONDS': 31536000000, 'AUTHORIZATION_CODE_EXPIRE_SECONDS': 600, 'REFRESH_TOKEN_EXPIRE_SECONDS': 2628000}`)\n* `ALLOW_OAUTH2_FOR_EXTERNAL_USERS`: For security reasons, users from external auth providers (LDAP, SAML, SSO, Radius, and others) are not allowed to create OAuth2 tokens. To change this behavior, enable this setting. Existing tokens will not be deleted when this setting is toggled off. (boolean, default=`False`)\n* `LOGIN_REDIRECT_OVERRIDE`: URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page. (string, default=`\"\"`)\n* `ALLOW_METRICS_FOR_ANONYMOUS_USERS`: If true, anonymous users are allowed to poll metrics. (boolean, default=`False`)\n\n* `CUSTOM_LOGIN_INFO`: If needed, you can add specific information (such as a legal notice or a disclaimer) to a text box in the login modal using this setting. Any content added must be in plain text or an HTML fragment, as other markup languages are not supported. (string, default=`\"\"`)\n* `CUSTOM_LOGO`: To set up a custom logo, provide a file that you create. For the custom logo to look its best, use a .png file with a transparent background. GIF, PNG and JPEG formats are supported. (string, default=`\"\"`)\n* `MAX_UI_JOB_EVENTS`: Maximum number of job events for the UI to retrieve within a single request. (integer, required)\n* `UI_LIVE_UPDATES_ENABLED`: If disabled, the page will not refresh when events are received. Reloading the page will be required to get the latest details. (boolean, required)\n\n* `SOCIAL_AUTH_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_USER_FIELDS`: When set to an empty list `[]`, this setting prevents new user accounts from being created. Only users who have previously logged in using social auth or have a user account with a matching email address will be able to login. (list, default=`None`)\n* `SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL`: Enabling this setting will tell social auth to use the full Email as username instead of the full name (boolean, default=`False`)\n* `AUTH_LDAP_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_1_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_1_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_1_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_1_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_1_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_1_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_1_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_1_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_1_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_1_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_1_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_1_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_1_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_1_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_1_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_1_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_2_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_2_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_2_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_2_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_2_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_2_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_2_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_2_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_2_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_2_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_2_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_2_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_2_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_2_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_2_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_2_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_3_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_3_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_3_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_3_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_3_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_3_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_3_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_3_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_3_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_3_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_3_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_3_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_3_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_3_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_3_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_3_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_4_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_4_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_4_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_4_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_4_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_4_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_4_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_4_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_4_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_4_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_4_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_4_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_4_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_4_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_4_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_4_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_5_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_5_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_5_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_5_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_5_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_5_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_5_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_5_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_5_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_5_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_5_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_5_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_5_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_5_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_5_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_5_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `RADIUS_SERVER`: Hostname/IP of RADIUS server. RADIUS authentication is disabled if this setting is empty. (string, default=`\"\"`)\n* `RADIUS_PORT`: Port of RADIUS server. (integer, default=`1812`)\n* `RADIUS_SECRET`: Shared secret for authenticating to RADIUS server. (string, default=`\"\"`)\n* `TACACSPLUS_HOST`: Hostname of TACACS+ server. (string, default=`\"\"`)\n* `TACACSPLUS_PORT`: Port number of TACACS+ server. (integer, default=`49`)\n* `TACACSPLUS_SECRET`: Shared secret for authenticating to TACACS+ server. (string, default=`\"\"`)\n* `TACACSPLUS_SESSION_TIMEOUT`: TACACS+ session timeout value in seconds, 0 disables timeout. (integer, default=`5`)\n* `TACACSPLUS_AUTH_PROTOCOL`: Choose the authentication protocol used by TACACS+ client. (choice)\n - `ascii` (default)\n - `pap`\n* `TACACSPLUS_REM_ADDR`: Enable the client address sending by TACACS+ client. (boolean, default=`False`)\n\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_KEY`: The OAuth2 key from your web application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET`: The OAuth2 secret from your web application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS`: Update this setting to restrict the domains who are allowed to login using Google OAuth2. (list, default=`[]`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_AUTH_EXTRA_ARGUMENTS`: Extra arguments for Google OAuth2 login. You can restrict it to only allow a single domain to authenticate, even if the user is logged in with multple Google accounts. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_KEY`: The OAuth2 key (Client ID) from your GitHub developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_SECRET`: The OAuth2 secret (Client Secret) from your GitHub developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORG_NAME`: The name of your GitHub organization, as used in your organization's URL: https://github.com/<yourorg>/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_TEAM_ID`: Find the numeric team ID using the Github API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME`: The name of your GitHub Enterprise organization, as used in your organization's URL: https://github.com/<yourorg>/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID`: Find the numeric team ID using the Github Enterprise API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_KEY`: The OAuth2 key (Client ID) from your Azure AD application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET`: The OAuth2 secret (Client Secret) from your Azure AD application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_OIDC_KEY`: The OIDC key (Client ID) from your IDP. (string, default=`\"\"`)\n* `SOCIAL_AUTH_OIDC_SECRET`: The OIDC secret (Client Secret) from your IDP. (string, default=`\"\"`)\n* `SOCIAL_AUTH_OIDC_OIDC_ENDPOINT`: The URL for your OIDC provider including the path up to /.well-known/openid-configuration (string, default=`\"\"`)\n* `SOCIAL_AUTH_OIDC_VERIFY_SSL`: Verify the OIDC provider ssl certificate. (boolean, default=`True`)\n* `SAML_AUTO_CREATE_OBJECTS`: When enabled (the default), mapped Organizations and Teams will be created automatically on successful SAML login. (boolean, default=`True`)\n\n\n* `SOCIAL_AUTH_SAML_SP_ENTITY_ID`: The application-defined unique identifier used as the audience of the SAML service provider (SP) configuration. This is usually the URL for the service. (string, default=`\"\"`)\n* `SOCIAL_AUTH_SAML_SP_PUBLIC_CERT`: Create a keypair to use as a service provider (SP) and include the certificate content here. (string, required)\n* `SOCIAL_AUTH_SAML_SP_PRIVATE_KEY`: Create a keypair to use as a service provider (SP) and include the private key content here. (string, required)\n* `SOCIAL_AUTH_SAML_ORG_INFO`: Provide the URL, display name, and the name of your app. Refer to the documentation for example syntax. (nested object, required)\n* `SOCIAL_AUTH_SAML_TECHNICAL_CONTACT`: Provide the name and email address of the technical contact for your service provider. Refer to the documentation for example syntax. (nested object, required)\n* `SOCIAL_AUTH_SAML_SUPPORT_CONTACT`: Provide the name and email address of the support contact for your service provider. Refer to the documentation for example syntax. (nested object, required)\n* `SOCIAL_AUTH_SAML_ENABLED_IDPS`: Configure the Entity ID, SSO URL and certificate for each identity provider (IdP) in use. Multiple SAML IdPs are supported. Some IdPs may provide user data using attribute names that differ from the default OIDs. Attribute names may be overridden for each IdP. Refer to the Ansible documentation for additional details and syntax. (nested object, default=`{}`)\n* `SOCIAL_AUTH_SAML_SECURITY_CONFIG`: A dict of key value pairs that are passed to the underlying python-saml security setting https://github.com/onelogin/python-saml#settings (nested object, default=`{'requestedAuthnContext': False}`)\n* `SOCIAL_AUTH_SAML_SP_EXTRA`: A dict of key value pairs to be passed to the underlying python-saml Service Provider configuration setting. (nested object, default=`None`)\n* `SOCIAL_AUTH_SAML_EXTRA_DATA`: A list of tuples that maps IDP attributes to extra_attributes. Each attribute will be a list of values, even if only 1 value. (list, default=`None`)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_SAML_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_ATTR`: Used to translate user organization membership. (nested object, default=`{}`)\n* `SOCIAL_AUTH_SAML_TEAM_ATTR`: Used to translate user team membership. (nested object, default=`{}`)\n* `SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR`: Used to map super users and system auditors from SAML. (nested object, default=`{}`)\n* `LOCAL_PASSWORD_MIN_LENGTH`: Minimum number of characters required in a local password. 0 means no minimum (integer, default=`0`)\n* `LOCAL_PASSWORD_MIN_DIGITS`: Minimum number of digit characters required in a local password. 0 means no minimum (integer, default=`0`)\n* `LOCAL_PASSWORD_MIN_UPPER`: Minimum number of uppercase characters required in a local password. 0 means no minimum (integer, default=`0`)\n* `LOCAL_PASSWORD_MIN_SPECIAL`: Minimum number of special characters required in a local password. 0 means no minimum (integer, default=`0`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Setting:\n\nMake a DELETE request to this resource to delete this setting.", + "operationId": "api_settings_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL": "example.com", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_URL": "example.com" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL": "example.com", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_CALLBACK_URL": "https://towerhost/sso/complete/github-enterprise/", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY": "", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_ORGANIZATION_MAP": null, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET": "", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP": null, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_URL": "example.com" + } + }, + "schema": { + "$ref": "#/definitions/SettingSingleton" + } + }, + "400": { + "examples": { + "application/json": { + "CUSTOM_LOGO": [ + "Invalid base64-encoded data in data URL." + ] + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single setting", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `ACTIVITY_STREAM_ENABLED`: Enable capturing activity for the activity stream. (boolean)\n* `ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC`: Enable capturing activity for the activity stream when running inventory sync. (boolean)\n* `ORG_ADMINS_CAN_SEE_ALL_USERS`: Controls whether any Organization Admin can view all users and teams, even those not associated with their Organization. (boolean)\n* `MANAGE_ORGANIZATION_AUTH`: Controls whether any Organization Admin has the privileges to create and manage users and teams. You may want to disable this ability if you are using an LDAP or SAML integration. (boolean)\n* `TOWER_URL_BASE`: This setting is used by services like notifications to render a valid url to the service. (string)\n* `REMOTE_HOST_HEADERS`: HTTP headers and meta keys to search to determine remote host name or IP. Add additional items to this list, such as "HTTP_X_FORWARDED_FOR", if behind a reverse proxy. See the "Proxy Support" section of the AAP Installation guide for more details. (list)\n* `PROXY_IP_ALLOWED_LIST`: If the service is behind a reverse proxy/load balancer, use this setting to configure the proxy IP addresses from which the service should trust custom REMOTE_HOST_HEADERS header values. If this setting is an empty list (the default), the headers specified by REMOTE_HOST_HEADERS will be trusted unconditionally') (list)\n* `CSRF_TRUSTED_ORIGINS`: If the service is behind a reverse proxy/load balancer, use this setting to configure the schema://addresses from which the service should trust Origin header values. (list)\n* `LICENSE`: The license controls which features and functionality are enabled. Use /api/v2/config/ to update or change the license. (nested object)\n* `REDHAT_USERNAME`: This username is used to send data to Automation Analytics (string)\n* `REDHAT_PASSWORD`: This password is used to send data to Automation Analytics (string)\n* `SUBSCRIPTIONS_USERNAME`: This username is used to retrieve subscription and content information (string)\n* `SUBSCRIPTIONS_PASSWORD`: This password is used to retrieve subscription and content information (string)\n* `AUTOMATION_ANALYTICS_URL`: This setting is used to to configure the upload URL for data collection for Automation Analytics. (string)\n* `INSTALL_UUID`: (string)\n* `DEFAULT_CONTROL_PLANE_QUEUE_NAME`: (string)\n* `DEFAULT_EXECUTION_QUEUE_NAME`: (string)\n* `DEFAULT_EXECUTION_ENVIRONMENT`: The Execution Environment to be used when one has not been configured for a job template. (field)\n* `CUSTOM_VENV_PATHS`: Paths where Tower will look for custom virtual environments (in addition to /var/lib/awx/venv/). Enter one path per line. (list)\n* `AD_HOC_COMMANDS`: List of modules allowed to be used by ad-hoc jobs. (list)\n* `ALLOW_JINJA_IN_EXTRA_VARS`: Ansible allows variable substitution via the Jinja2 templating language for --extra-vars. This poses a potential security risk where users with the ability to specify extra vars at job launch time can use Jinja2 templates to run arbitrary Python. It is recommended that this value be set to "template" or "never". (choice)\n - `always`: Always\n - `never`: Never\n - `template`: Only On Job Template Definitions\n* `AWX_ISOLATION_BASE_PATH`: The directory in which the service will create new temporary directories for job execution and isolation (such as credential files). (string)\n* `AWX_ISOLATION_SHOW_PATHS`: List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. Volumes will be mounted from the execution node to the container. The supported format is HOST-DIR[:CONTAINER-DIR[:OPTIONS]]. (list)\n* `AWX_TASK_ENV`: Additional environment variables set for playbook runs, inventory updates, project updates, and notification sending. (nested object)\n* `AWX_RUNNER_KEEPALIVE_SECONDS`: Only applies to jobs running in a Container Group. If not 0, send a message every so-many seconds to keep connection open. (integer)\n* `GALAXY_TASK_ENV`: Additional environment variables set for invocations of ansible-galaxy within project updates. Useful if you must use a proxy server for ansible-galaxy but not git. (nested object)\n* `INSIGHTS_TRACKING_STATE`: Enables the service to gather data on automation and send it to Automation Analytics. (boolean)\n* `PROJECT_UPDATE_VVV`: Adds the CLI -vvv flag to ansible-playbook runs of project_update.yml used for project updates. (boolean)\n* `AWX_ROLES_ENABLED`: Allows roles to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean)\n* `AWX_COLLECTIONS_ENABLED`: Allows collections to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean)\n* `AWX_SHOW_PLAYBOOK_LINKS`: Follow symbolic links when scanning for playbooks. Be aware that setting this to True can lead to infinite recursion if a link points to a parent directory of itself. (boolean)\n* `AWX_MOUNT_ISOLATED_PATHS_ON_K8S`: Expose paths via hostPath for the Pods created by a Container Group. HostPath volumes present many security risks, and it is a best practice to avoid the use of HostPaths when possible. (boolean)\n* `GALAXY_IGNORE_CERTS`: If set to true, certificate validation will not be done when installing content from any Galaxy server. (boolean)\n* `STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display before requiring the output be downloaded. (integer)\n* `EVENT_STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display for a single job or ad hoc command event. `stdout` will end with `\u2026` when truncated. (integer)\n* `MAX_WEBSOCKET_EVENT_RATE`: Maximum number of messages to update the UI live job output with per second. Value of 0 means no limit. (integer)\n* `SCHEDULE_MAX_JOBS`: Maximum number of the same job template that can be waiting to run when launching from a schedule before no more are created. (integer)\n* `AWX_ANSIBLE_CALLBACK_PLUGINS`: List of paths to search for extra callback plugins to be used when running jobs. Enter one path per line. (list)\n* `DEFAULT_JOB_TIMEOUT`: Maximum time in seconds to allow jobs to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual job template will override this. (integer)\n* `DEFAULT_JOB_IDLE_TIMEOUT`: If no output is detected from ansible in this number of seconds the execution will be terminated. Use value of 0 to indicate that no idle timeout should be imposed. (integer)\n* `DEFAULT_INVENTORY_UPDATE_TIMEOUT`: Maximum time in seconds to allow inventory updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual inventory source will override this. (integer)\n* `DEFAULT_PROJECT_UPDATE_TIMEOUT`: Maximum time in seconds to allow project updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual project will override this. (integer)\n* `ANSIBLE_FACT_CACHE_TIMEOUT`: Maximum time, in seconds, that stored Ansible facts are considered valid since the last time they were modified. Only valid, non-stale, facts will be accessible by a playbook. Note, this does not influence the deletion of ansible_facts from the database. Use a value of 0 to indicate that no timeout should be imposed. (integer)\n* `MAX_FORKS`: Saving a Job Template with more than this number of forks will result in an error. When set to 0, no limit is applied. (integer)\n* `LOG_AGGREGATOR_HOST`: Hostname/IP where external logs will be sent to. (string)\n* `LOG_AGGREGATOR_PORT`: Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator). (integer)\n* `LOG_AGGREGATOR_TYPE`: Format messages for the chosen log aggregator. (choice)\n - `None`: ---------\n - `logstash`\n - `splunk`\n - `loggly`\n - `sumologic`\n - `other`\n* `LOG_AGGREGATOR_USERNAME`: Username for external log aggregator (if required; HTTP/s only). (string)\n* `LOG_AGGREGATOR_PASSWORD`: Password or authentication token for external log aggregator (if required; HTTP/s only). (string)\n* `LOG_AGGREGATOR_LOGGERS`: List of loggers that will send HTTP logs to the collector, these can include any or all of: \nawx - service logs\nactivity_stream - activity stream records\njob_events - callback data from Ansible job events\nsystem_tracking - facts gathered from scan jobs\nbroadcast_websocket - errors pertaining to websockets broadcast metrics\n (list)\n* `LOG_AGGREGATOR_INDIVIDUAL_FACTS`: If set, system tracking facts will be sent for each package, service, or other item found in a scan, allowing for greater search query granularity. If unset, facts will be sent as a single dictionary, allowing for greater efficiency in fact processing. (boolean)\n* `LOG_AGGREGATOR_ENABLED`: Enable sending logs to external log aggregator. (boolean)\n* `LOG_AGGREGATOR_TOWER_UUID`: Useful to uniquely identify instances. (string)\n* `LOG_AGGREGATOR_PROTOCOL`: Protocol used to communicate with log aggregator. HTTPS/HTTP assumes HTTPS unless http:// is explicitly used in the Logging Aggregator hostname. (choice)\n - `https`: HTTPS/HTTP\n - `tcp`: TCP\n - `udp`: UDP\n* `LOG_AGGREGATOR_TCP_TIMEOUT`: Number of seconds for a TCP connection to external log aggregator to timeout. Applies to HTTPS and TCP log aggregator protocols. (integer)\n* `LOG_AGGREGATOR_VERIFY_CERT`: Flag to control enable/disable of certificate verification when LOG_AGGREGATOR_PROTOCOL is "https". If enabled, the log handler will verify certificate sent by external log aggregator before establishing connection. (boolean)\n* `LOG_AGGREGATOR_LEVEL`: Level threshold used by log handler. Severities from lowest to highest are DEBUG, INFO, WARNING, ERROR, CRITICAL. Messages less severe than the threshold will be ignored by log handler. (messages under category awx.anlytics ignore this setting) (choice)\n - `DEBUG`\n - `INFO`\n - `WARNING`\n - `ERROR`\n - `CRITICAL`\n* `LOG_AGGREGATOR_ACTION_QUEUE_SIZE`: Defines how large the rsyslog action queue can grow in number of messages stored. This can have an impact on memory utilization. When the queue reaches 75% of this number, the queue will start writing to disk (queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and DEBUG messages will start to be discarded (queue.discardMark with queue.discardSeverity=5). (integer)\n* `LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB`: Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH. (integer)\n* `LOG_AGGREGATOR_MAX_DISK_USAGE_PATH`: Location to persist logs that should be retried after an outage of the external log aggregator (defaults to /var/lib/awx). Equivalent to the rsyslogd queue.spoolDirectory setting. (string)\n* `LOG_AGGREGATOR_RSYSLOGD_DEBUG`: Enabled high verbosity debugging for rsyslogd. Useful for debugging connection issues for external log aggregation. (boolean)\n* `API_400_ERROR_LOG_FORMAT`: The format of logged messages when an API 4XX error occurs, the following variables will be substituted: \nstatus_code - The HTTP status code of the error\nuser_name - The user name attempting to use the API\nurl_path - The URL path to the API endpoint called\nremote_addr - The remote address seen for the user\nerror - The error set by the api endpoint\nVariables need to be in the format {<variable name>}. (string)\n* `AUTOMATION_ANALYTICS_LAST_GATHER`: (datetime)\n* `AUTOMATION_ANALYTICS_LAST_ENTRIES`: (string)\n* `AUTOMATION_ANALYTICS_GATHER_INTERVAL`: Interval (in seconds) between data gathering. (integer)\n* `IS_K8S`: Indicates whether the instance is part of a kubernetes-based deployment. (boolean)\n* `BULK_JOB_MAX_LAUNCH`: Max jobs to allow bulk jobs to launch (integer)\n* `BULK_HOST_MAX_CREATE`: Max number of hosts to allow to be created in a single bulk action (integer)\n* `UI_NEXT`: Enable preview of new user interface. (boolean)\n* `SUBSCRIPTION_USAGE_MODEL`: (choice)\n - `\"\"`: Default model for AWX - no subscription. Deletion of host_metrics will not be considered for purposes of managed host counting\n - `unique_managed_hosts`: Usage based on unique managed nodes in a large historical time frame and delete functionality for no longer used managed nodes\n* `CLEANUP_HOST_METRICS_LAST_TS`: (datetime)\n* `HOST_METRIC_SUMMARY_TASK_LAST_TS`: (datetime)\n* `AWX_CLEANUP_PATHS`: Enable or Disable TMP Dir cleanup (boolean)\n* `AWX_REQUEST_PROFILE`: Debug web request python timing (boolean)\n* `DEFAULT_CONTAINER_RUN_OPTIONS`: List of options to pass to podman run example: ['--network', 'slirp4netns:enable_ipv6=true', '--log-level', 'debug'] (list)\n* `RECEPTOR_RELEASE_WORK`: Release receptor work (boolean)\n* `SESSION_COOKIE_AGE`: Number of seconds that a user is inactive before they will need to login again. (integer)\n* `SESSIONS_PER_USER`: Maximum number of simultaneous logged in sessions a user may have. To disable enter -1. (integer)\n* `DISABLE_LOCAL_AUTH`: Controls whether users are prevented from using the built-in authentication system. You probably want to do this if you are using an LDAP or SAML integration. (boolean)\n* `AUTH_BASIC_ENABLED`: Enable HTTP Basic Auth for the API Browser. (boolean)\n* `OAUTH2_PROVIDER`: Dictionary for customizing OAuth 2 timeouts, available items are `ACCESS_TOKEN_EXPIRE_SECONDS`, the duration of access tokens in the number of seconds, `AUTHORIZATION_CODE_EXPIRE_SECONDS`, the duration of authorization codes in the number of seconds, and `REFRESH_TOKEN_EXPIRE_SECONDS`, the duration of refresh tokens, after expired access tokens, in the number of seconds. (nested object)\n* `ALLOW_OAUTH2_FOR_EXTERNAL_USERS`: For security reasons, users from external auth providers (LDAP, SAML, SSO, Radius, and others) are not allowed to create OAuth2 tokens. To change this behavior, enable this setting. Existing tokens will not be deleted when this setting is toggled off. (boolean)\n* `LOGIN_REDIRECT_OVERRIDE`: URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page. (string)\n* `ALLOW_METRICS_FOR_ANONYMOUS_USERS`: If true, anonymous users are allowed to poll metrics. (boolean)\n* `PENDO_TRACKING_STATE`: Enable or Disable User Analytics Tracking. (choice)\n - `off`: Off\n - `anonymous`: Anonymous\n - `detailed`: Detailed\n* `CUSTOM_LOGIN_INFO`: If needed, you can add specific information (such as a legal notice or a disclaimer) to a text box in the login modal using this setting. Any content added must be in plain text or an HTML fragment, as other markup languages are not supported. (string)\n* `CUSTOM_LOGO`: To set up a custom logo, provide a file that you create. For the custom logo to look its best, use a .png file with a transparent background. GIF, PNG and JPEG formats are supported. (string)\n* `MAX_UI_JOB_EVENTS`: Maximum number of job events for the UI to retrieve within a single request. (integer)\n* `UI_LIVE_UPDATES_ENABLED`: If disabled, the page will not refresh when events are received. Reloading the page will be required to get the latest details. (boolean)\n* `AUTHENTICATION_BACKENDS`: List of authentication backends that are enabled based on license features and other authentication settings. (list)\n* `SOCIAL_AUTH_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_USER_FIELDS`: When set to an empty list `[]`, this setting prevents new user accounts from being created. Only users who have previously logged in using social auth or have a user account with a matching email address will be able to login. (list)\n* `SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL`: Enabling this setting will tell social auth to use the full Email as username instead of the full name (boolean)\n* `AUTH_LDAP_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_1_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_1_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_1_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_1_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_1_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_1_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_1_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_1_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_1_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_1_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_1_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_1_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_1_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_1_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_1_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_1_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_2_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_2_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_2_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_2_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_2_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_2_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_2_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_2_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_2_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_2_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_2_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_2_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_2_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_2_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_2_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_2_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_3_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_3_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_3_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_3_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_3_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_3_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_3_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_3_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_3_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_3_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_3_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_3_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_3_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_3_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_3_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_3_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_4_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_4_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_4_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_4_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_4_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_4_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_4_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_4_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_4_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_4_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_4_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_4_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_4_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_4_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_4_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_4_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_5_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string)\n* `AUTH_LDAP_5_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string)\n* `AUTH_LDAP_5_BIND_PASSWORD`: Password used to bind LDAP user account. (string)\n* `AUTH_LDAP_5_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean)\n* `AUTH_LDAP_5_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object)\n* `AUTH_LDAP_5_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list)\n* `AUTH_LDAP_5_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string)\n* `AUTH_LDAP_5_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object)\n* `AUTH_LDAP_5_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list)\n* `AUTH_LDAP_5_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType`\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_5_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object)\n* `AUTH_LDAP_5_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string)\n* `AUTH_LDAP_5_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string)\n* `AUTH_LDAP_5_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object)\n* `AUTH_LDAP_5_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object)\n* `AUTH_LDAP_5_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object)\n* `RADIUS_SERVER`: Hostname/IP of RADIUS server. RADIUS authentication is disabled if this setting is empty. (string)\n* `RADIUS_PORT`: Port of RADIUS server. (integer)\n* `RADIUS_SECRET`: Shared secret for authenticating to RADIUS server. (string)\n* `TACACSPLUS_HOST`: Hostname of TACACS+ server. (string)\n* `TACACSPLUS_PORT`: Port number of TACACS+ server. (integer)\n* `TACACSPLUS_SECRET`: Shared secret for authenticating to TACACS+ server. (string)\n* `TACACSPLUS_SESSION_TIMEOUT`: TACACS+ session timeout value in seconds, 0 disables timeout. (integer)\n* `TACACSPLUS_AUTH_PROTOCOL`: Choose the authentication protocol used by TACACS+ client. (choice)\n - `ascii`\n - `pap`\n* `TACACSPLUS_REM_ADDR`: Enable the client address sending by TACACS+ client. (boolean)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_KEY`: The OAuth2 key from your web application. (string)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET`: The OAuth2 secret from your web application. (string)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS`: Update this setting to restrict the domains who are allowed to login using Google OAuth2. (list)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_AUTH_EXTRA_ARGUMENTS`: Extra arguments for Google OAuth2 login. You can restrict it to only allow a single domain to authenticate, even if the user is logged in with multple Google accounts. Refer to the documentation for more detail. (nested object)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_KEY`: The OAuth2 key (Client ID) from your GitHub developer application. (string)\n* `SOCIAL_AUTH_GITHUB_SECRET`: The OAuth2 secret (Client Secret) from your GitHub developer application. (string)\n* `SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ORG_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_NAME`: The name of your GitHub organization, as used in your organization's URL: https://github.com/<yourorg>/. (string)\n* `SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_TEAM_CALLBACK_URL`: Create an organization-owned application at https://github.com/organizations/<yourorg>/settings/applications and obtain an OAuth2 key (Client ID) and secret (Client Secret). Provide this URL as the callback URL for your application. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_ID`: Find the numeric team ID using the Github API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string)\n* `SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise developer application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise developer application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME`: The name of your GitHub Enterprise organization, as used in your organization's URL: https://github.com/<yourorg>/. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_CALLBACK_URL`: Create an organization-owned application at https://github.com/organizations/<yourorg>/settings/applications and obtain an OAuth2 key (Client ID) and secret (Client Secret). Provide this URL as the callback URL for your application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID`: Find the numeric team ID using the Github Enterprise API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_CALLBACK_URL`: Provide this URL as the callback URL for your application as part of your registration process. Refer to the documentation for more detail. (string)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_KEY`: The OAuth2 key (Client ID) from your Azure AD application. (string)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET`: The OAuth2 secret (Client Secret) from your Azure AD application. (string)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_OIDC_KEY`: The OIDC key (Client ID) from your IDP. (string)\n* `SOCIAL_AUTH_OIDC_SECRET`: The OIDC secret (Client Secret) from your IDP. (string)\n* `SOCIAL_AUTH_OIDC_OIDC_ENDPOINT`: The URL for your OIDC provider including the path up to /.well-known/openid-configuration (string)\n* `SOCIAL_AUTH_OIDC_VERIFY_SSL`: Verify the OIDC provider ssl certificate. (boolean)\n* `SAML_AUTO_CREATE_OBJECTS`: When enabled (the default), mapped Organizations and Teams will be created automatically on successful SAML login. (boolean)\n* `SOCIAL_AUTH_SAML_CALLBACK_URL`: Register the service as a service provider (SP) with each identity provider (IdP) you have configured. Provide your SP Entity ID and this ACS URL for your application. (string)\n* `SOCIAL_AUTH_SAML_METADATA_URL`: If your identity provider (IdP) allows uploading an XML metadata file, you can download one from this URL. (string)\n* `SOCIAL_AUTH_SAML_SP_ENTITY_ID`: The application-defined unique identifier used as the audience of the SAML service provider (SP) configuration. This is usually the URL for the service. (string)\n* `SOCIAL_AUTH_SAML_SP_PUBLIC_CERT`: Create a keypair to use as a service provider (SP) and include the certificate content here. (string)\n* `SOCIAL_AUTH_SAML_SP_PRIVATE_KEY`: Create a keypair to use as a service provider (SP) and include the private key content here. (string)\n* `SOCIAL_AUTH_SAML_ORG_INFO`: Provide the URL, display name, and the name of your app. Refer to the documentation for example syntax. (nested object)\n* `SOCIAL_AUTH_SAML_TECHNICAL_CONTACT`: Provide the name and email address of the technical contact for your service provider. Refer to the documentation for example syntax. (nested object)\n* `SOCIAL_AUTH_SAML_SUPPORT_CONTACT`: Provide the name and email address of the support contact for your service provider. Refer to the documentation for example syntax. (nested object)\n* `SOCIAL_AUTH_SAML_ENABLED_IDPS`: Configure the Entity ID, SSO URL and certificate for each identity provider (IdP) in use. Multiple SAML IdPs are supported. Some IdPs may provide user data using attribute names that differ from the default OIDs. Attribute names may be overridden for each IdP. Refer to the Ansible documentation for additional details and syntax. (nested object)\n* `SOCIAL_AUTH_SAML_SECURITY_CONFIG`: A dict of key value pairs that are passed to the underlying python-saml security setting https://github.com/onelogin/python-saml#settings (nested object)\n* `SOCIAL_AUTH_SAML_SP_EXTRA`: A dict of key value pairs to be passed to the underlying python-saml Service Provider configuration setting. (nested object)\n* `SOCIAL_AUTH_SAML_EXTRA_DATA`: A list of tuples that maps IDP attributes to extra_attributes. Each attribute will be a list of values, even if only 1 value. (list)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object)\n* `SOCIAL_AUTH_SAML_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_ATTR`: Used to translate user organization membership. (nested object)\n* `SOCIAL_AUTH_SAML_TEAM_ATTR`: Used to translate user team membership. (nested object)\n* `SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR`: Used to map super users and system auditors from SAML. (nested object)\n* `LOCAL_PASSWORD_MIN_LENGTH`: Minimum number of characters required in a local password. 0 means no minimum (integer)\n* `LOCAL_PASSWORD_MIN_DIGITS`: Minimum number of digit characters required in a local password. 0 means no minimum (integer)\n* `LOCAL_PASSWORD_MIN_UPPER`: Minimum number of uppercase characters required in a local password. 0 means no minimum (integer)\n* `LOCAL_PASSWORD_MIN_SPECIAL`: Minimum number of special characters required in a local password. 0 means no minimum (integer)\n\n\n\n\n\n# Update a Setting:\n\nMake a PUT or PATCH request to this resource to update this\nsetting. The following fields may be modified:\n\n\n* `ACTIVITY_STREAM_ENABLED`: Enable capturing activity for the activity stream. (boolean, required)\n* `ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC`: Enable capturing activity for the activity stream when running inventory sync. (boolean, required)\n* `ORG_ADMINS_CAN_SEE_ALL_USERS`: Controls whether any Organization Admin can view all users and teams, even those not associated with their Organization. (boolean, required)\n* `MANAGE_ORGANIZATION_AUTH`: Controls whether any Organization Admin has the privileges to create and manage users and teams. You may want to disable this ability if you are using an LDAP or SAML integration. (boolean, required)\n* `TOWER_URL_BASE`: This setting is used by services like notifications to render a valid url to the service. (string, required)\n* `REMOTE_HOST_HEADERS`: HTTP headers and meta keys to search to determine remote host name or IP. Add additional items to this list, such as "HTTP_X_FORWARDED_FOR", if behind a reverse proxy. See the "Proxy Support" section of the AAP Installation guide for more details. (list, required)\n* `PROXY_IP_ALLOWED_LIST`: If the service is behind a reverse proxy/load balancer, use this setting to configure the proxy IP addresses from which the service should trust custom REMOTE_HOST_HEADERS header values. If this setting is an empty list (the default), the headers specified by REMOTE_HOST_HEADERS will be trusted unconditionally') (list, required)\n* `CSRF_TRUSTED_ORIGINS`: If the service is behind a reverse proxy/load balancer, use this setting to configure the schema://addresses from which the service should trust Origin header values. (list, default=`[]`)\n\n* `REDHAT_USERNAME`: This username is used to send data to Automation Analytics (string, default=`\"\"`)\n* `REDHAT_PASSWORD`: This password is used to send data to Automation Analytics (string, default=`\"\"`)\n* `SUBSCRIPTIONS_USERNAME`: This username is used to retrieve subscription and content information (string, default=`\"\"`)\n* `SUBSCRIPTIONS_PASSWORD`: This password is used to retrieve subscription and content information (string, default=`\"\"`)\n* `AUTOMATION_ANALYTICS_URL`: This setting is used to to configure the upload URL for data collection for Automation Analytics. (string, default=`\"https://example.com\"`)\n\n\n\n* `DEFAULT_EXECUTION_ENVIRONMENT`: The Execution Environment to be used when one has not been configured for a job template. (field, default=`None`)\n* `CUSTOM_VENV_PATHS`: Paths where Tower will look for custom virtual environments (in addition to /var/lib/awx/venv/). Enter one path per line. (list, default=`[]`)\n* `AD_HOC_COMMANDS`: List of modules allowed to be used by ad-hoc jobs. (list, default=`['command', 'shell', 'yum', 'apt', 'apt_key', 'apt_repository', 'apt_rpm', 'service', 'group', 'user', 'mount', 'ping', 'selinux', 'setup', 'win_ping', 'win_service', 'win_updates', 'win_group', 'win_user']`)\n* `ALLOW_JINJA_IN_EXTRA_VARS`: Ansible allows variable substitution via the Jinja2 templating language for --extra-vars. This poses a potential security risk where users with the ability to specify extra vars at job launch time can use Jinja2 templates to run arbitrary Python. It is recommended that this value be set to "template" or "never". (choice, required)\n - `always`: Always\n - `never`: Never\n - `template`: Only On Job Template Definitions (default)\n* `AWX_ISOLATION_BASE_PATH`: The directory in which the service will create new temporary directories for job execution and isolation (such as credential files). (string, required)\n* `AWX_ISOLATION_SHOW_PATHS`: List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. Volumes will be mounted from the execution node to the container. The supported format is HOST-DIR[:CONTAINER-DIR[:OPTIONS]]. (list, default=`[]`)\n* `AWX_TASK_ENV`: Additional environment variables set for playbook runs, inventory updates, project updates, and notification sending. (nested object, default=`{}`)\n* `AWX_RUNNER_KEEPALIVE_SECONDS`: Only applies to jobs running in a Container Group. If not 0, send a message every so-many seconds to keep connection open. (integer, required)\n* `GALAXY_TASK_ENV`: Additional environment variables set for invocations of ansible-galaxy within project updates. Useful if you must use a proxy server for ansible-galaxy but not git. (nested object, required)\n* `INSIGHTS_TRACKING_STATE`: Enables the service to gather data on automation and send it to Automation Analytics. (boolean, default=`False`)\n* `PROJECT_UPDATE_VVV`: Adds the CLI -vvv flag to ansible-playbook runs of project_update.yml used for project updates. (boolean, required)\n* `AWX_ROLES_ENABLED`: Allows roles to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean, default=`True`)\n* `AWX_COLLECTIONS_ENABLED`: Allows collections to be dynamically downloaded from a requirements.yml file for SCM projects. (boolean, default=`True`)\n* `AWX_SHOW_PLAYBOOK_LINKS`: Follow symbolic links when scanning for playbooks. Be aware that setting this to True can lead to infinite recursion if a link points to a parent directory of itself. (boolean, default=`False`)\n* `AWX_MOUNT_ISOLATED_PATHS_ON_K8S`: Expose paths via hostPath for the Pods created by a Container Group. HostPath volumes present many security risks, and it is a best practice to avoid the use of HostPaths when possible. (boolean, default=`False`)\n* `GALAXY_IGNORE_CERTS`: If set to true, certificate validation will not be done when installing content from any Galaxy server. (boolean, default=`False`)\n* `STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display before requiring the output be downloaded. (integer, required)\n* `EVENT_STDOUT_MAX_BYTES_DISPLAY`: Maximum Size of Standard Output in bytes to display for a single job or ad hoc command event. `stdout` will end with `\u2026` when truncated. (integer, required)\n* `MAX_WEBSOCKET_EVENT_RATE`: Maximum number of messages to update the UI live job output with per second. Value of 0 means no limit. (integer, default=`30`)\n* `SCHEDULE_MAX_JOBS`: Maximum number of the same job template that can be waiting to run when launching from a schedule before no more are created. (integer, required)\n* `AWX_ANSIBLE_CALLBACK_PLUGINS`: List of paths to search for extra callback plugins to be used when running jobs. Enter one path per line. (list, default=`[]`)\n* `DEFAULT_JOB_TIMEOUT`: Maximum time in seconds to allow jobs to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual job template will override this. (integer, default=`0`)\n* `DEFAULT_JOB_IDLE_TIMEOUT`: If no output is detected from ansible in this number of seconds the execution will be terminated. Use value of 0 to indicate that no idle timeout should be imposed. (integer, default=`0`)\n* `DEFAULT_INVENTORY_UPDATE_TIMEOUT`: Maximum time in seconds to allow inventory updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual inventory source will override this. (integer, default=`0`)\n* `DEFAULT_PROJECT_UPDATE_TIMEOUT`: Maximum time in seconds to allow project updates to run. Use value of 0 to indicate that no timeout should be imposed. A timeout set on an individual project will override this. (integer, default=`0`)\n* `ANSIBLE_FACT_CACHE_TIMEOUT`: Maximum time, in seconds, that stored Ansible facts are considered valid since the last time they were modified. Only valid, non-stale, facts will be accessible by a playbook. Note, this does not influence the deletion of ansible_facts from the database. Use a value of 0 to indicate that no timeout should be imposed. (integer, default=`0`)\n* `MAX_FORKS`: Saving a Job Template with more than this number of forks will result in an error. When set to 0, no limit is applied. (integer, default=`200`)\n* `LOG_AGGREGATOR_HOST`: Hostname/IP where external logs will be sent to. (string, default=`\"\"`)\n* `LOG_AGGREGATOR_PORT`: Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator). (integer, default=`None`)\n* `LOG_AGGREGATOR_TYPE`: Format messages for the chosen log aggregator. (choice)\n - `None`: --------- (default)\n - `logstash`\n - `splunk`\n - `loggly`\n - `sumologic`\n - `other`\n* `LOG_AGGREGATOR_USERNAME`: Username for external log aggregator (if required; HTTP/s only). (string, default=`\"\"`)\n* `LOG_AGGREGATOR_PASSWORD`: Password or authentication token for external log aggregator (if required; HTTP/s only). (string, default=`\"\"`)\n* `LOG_AGGREGATOR_LOGGERS`: List of loggers that will send HTTP logs to the collector, these can include any or all of: \nawx - service logs\nactivity_stream - activity stream records\njob_events - callback data from Ansible job events\nsystem_tracking - facts gathered from scan jobs\nbroadcast_websocket - errors pertaining to websockets broadcast metrics\n (list, default=`['awx', 'activity_stream', 'job_events', 'system_tracking', 'broadcast_websocket']`)\n* `LOG_AGGREGATOR_INDIVIDUAL_FACTS`: If set, system tracking facts will be sent for each package, service, or other item found in a scan, allowing for greater search query granularity. If unset, facts will be sent as a single dictionary, allowing for greater efficiency in fact processing. (boolean, default=`False`)\n* `LOG_AGGREGATOR_ENABLED`: Enable sending logs to external log aggregator. (boolean, default=`False`)\n* `LOG_AGGREGATOR_TOWER_UUID`: Useful to uniquely identify instances. (string, default=`\"\"`)\n* `LOG_AGGREGATOR_PROTOCOL`: Protocol used to communicate with log aggregator. HTTPS/HTTP assumes HTTPS unless http:// is explicitly used in the Logging Aggregator hostname. (choice)\n - `https`: HTTPS/HTTP (default)\n - `tcp`: TCP\n - `udp`: UDP\n* `LOG_AGGREGATOR_TCP_TIMEOUT`: Number of seconds for a TCP connection to external log aggregator to timeout. Applies to HTTPS and TCP log aggregator protocols. (integer, default=`5`)\n* `LOG_AGGREGATOR_VERIFY_CERT`: Flag to control enable/disable of certificate verification when LOG_AGGREGATOR_PROTOCOL is "https". If enabled, the log handler will verify certificate sent by external log aggregator before establishing connection. (boolean, default=`True`)\n* `LOG_AGGREGATOR_LEVEL`: Level threshold used by log handler. Severities from lowest to highest are DEBUG, INFO, WARNING, ERROR, CRITICAL. Messages less severe than the threshold will be ignored by log handler. (messages under category awx.anlytics ignore this setting) (choice)\n - `DEBUG`\n - `INFO` (default)\n - `WARNING`\n - `ERROR`\n - `CRITICAL`\n* `LOG_AGGREGATOR_ACTION_QUEUE_SIZE`: Defines how large the rsyslog action queue can grow in number of messages stored. This can have an impact on memory utilization. When the queue reaches 75% of this number, the queue will start writing to disk (queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and DEBUG messages will start to be discarded (queue.discardMark with queue.discardSeverity=5). (integer, default=`131072`)\n* `LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB`: Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH. (integer, default=`1`)\n* `LOG_AGGREGATOR_MAX_DISK_USAGE_PATH`: Location to persist logs that should be retried after an outage of the external log aggregator (defaults to /var/lib/awx). Equivalent to the rsyslogd queue.spoolDirectory setting. (string, default=`\"/var/lib/awx\"`)\n* `LOG_AGGREGATOR_RSYSLOGD_DEBUG`: Enabled high verbosity debugging for rsyslogd. Useful for debugging connection issues for external log aggregation. (boolean, default=`False`)\n* `API_400_ERROR_LOG_FORMAT`: The format of logged messages when an API 4XX error occurs, the following variables will be substituted: \nstatus_code - The HTTP status code of the error\nuser_name - The user name attempting to use the API\nurl_path - The URL path to the API endpoint called\nremote_addr - The remote address seen for the user\nerror - The error set by the api endpoint\nVariables need to be in the format {<variable name>}. (string, default=`\"status {status_code} received by user {user_name} attempting to access {url_path} from {remote_addr}\"`)\n* `AUTOMATION_ANALYTICS_LAST_GATHER`: (datetime, required)\n* `AUTOMATION_ANALYTICS_LAST_ENTRIES`: (string, default=`\"\"`)\n* `AUTOMATION_ANALYTICS_GATHER_INTERVAL`: Interval (in seconds) between data gathering. (integer, default=`14400`)\n\n* `BULK_JOB_MAX_LAUNCH`: Max jobs to allow bulk jobs to launch (integer, default=`100`)\n* `BULK_HOST_MAX_CREATE`: Max number of hosts to allow to be created in a single bulk action (integer, default=`100`)\n* `UI_NEXT`: Enable preview of new user interface. (boolean, default=`True`)\n* `SUBSCRIPTION_USAGE_MODEL`: (choice)\n - `\"\"`: Default model for AWX - no subscription. Deletion of host_metrics will not be considered for purposes of managed host counting (default)\n - `unique_managed_hosts`: Usage based on unique managed nodes in a large historical time frame and delete functionality for no longer used managed nodes\n* `CLEANUP_HOST_METRICS_LAST_TS`: (datetime, required)\n* `HOST_METRIC_SUMMARY_TASK_LAST_TS`: (datetime, required)\n* `AWX_CLEANUP_PATHS`: Enable or Disable TMP Dir cleanup (boolean, default=`True`)\n* `AWX_REQUEST_PROFILE`: Debug web request python timing (boolean, default=`False`)\n* `DEFAULT_CONTAINER_RUN_OPTIONS`: List of options to pass to podman run example: ['--network', 'slirp4netns:enable_ipv6=true', '--log-level', 'debug'] (list, default=`['--network', 'slirp4netns:enable_ipv6=true']`)\n* `RECEPTOR_RELEASE_WORK`: Release receptor work (boolean, default=`True`)\n* `SESSION_COOKIE_AGE`: Number of seconds that a user is inactive before they will need to login again. (integer, required)\n* `SESSIONS_PER_USER`: Maximum number of simultaneous logged in sessions a user may have. To disable enter -1. (integer, required)\n* `DISABLE_LOCAL_AUTH`: Controls whether users are prevented from using the built-in authentication system. You probably want to do this if you are using an LDAP or SAML integration. (boolean, required)\n* `AUTH_BASIC_ENABLED`: Enable HTTP Basic Auth for the API Browser. (boolean, required)\n* `OAUTH2_PROVIDER`: Dictionary for customizing OAuth 2 timeouts, available items are `ACCESS_TOKEN_EXPIRE_SECONDS`, the duration of access tokens in the number of seconds, `AUTHORIZATION_CODE_EXPIRE_SECONDS`, the duration of authorization codes in the number of seconds, and `REFRESH_TOKEN_EXPIRE_SECONDS`, the duration of refresh tokens, after expired access tokens, in the number of seconds. (nested object, default=`{'ACCESS_TOKEN_EXPIRE_SECONDS': 31536000000, 'AUTHORIZATION_CODE_EXPIRE_SECONDS': 600, 'REFRESH_TOKEN_EXPIRE_SECONDS': 2628000}`)\n* `ALLOW_OAUTH2_FOR_EXTERNAL_USERS`: For security reasons, users from external auth providers (LDAP, SAML, SSO, Radius, and others) are not allowed to create OAuth2 tokens. To change this behavior, enable this setting. Existing tokens will not be deleted when this setting is toggled off. (boolean, default=`False`)\n* `LOGIN_REDIRECT_OVERRIDE`: URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page. (string, default=`\"\"`)\n* `ALLOW_METRICS_FOR_ANONYMOUS_USERS`: If true, anonymous users are allowed to poll metrics. (boolean, default=`False`)\n\n* `CUSTOM_LOGIN_INFO`: If needed, you can add specific information (such as a legal notice or a disclaimer) to a text box in the login modal using this setting. Any content added must be in plain text or an HTML fragment, as other markup languages are not supported. (string, default=`\"\"`)\n* `CUSTOM_LOGO`: To set up a custom logo, provide a file that you create. For the custom logo to look its best, use a .png file with a transparent background. GIF, PNG and JPEG formats are supported. (string, default=`\"\"`)\n* `MAX_UI_JOB_EVENTS`: Maximum number of job events for the UI to retrieve within a single request. (integer, required)\n* `UI_LIVE_UPDATES_ENABLED`: If disabled, the page will not refresh when events are received. Reloading the page will be required to get the latest details. (boolean, required)\n\n* `SOCIAL_AUTH_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_USER_FIELDS`: When set to an empty list `[]`, this setting prevents new user accounts from being created. Only users who have previously logged in using social auth or have a user account with a matching email address will be able to login. (list, default=`None`)\n* `SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL`: Enabling this setting will tell social auth to use the full Email as username instead of the full name (boolean, default=`False`)\n* `AUTH_LDAP_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_1_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_1_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_1_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_1_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_1_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_1_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_1_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_1_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_1_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_1_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_1_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_1_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_1_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_1_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_1_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_1_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_2_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_2_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_2_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_2_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_2_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_2_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_2_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_2_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_2_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_2_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_2_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_2_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_2_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_2_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_2_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_2_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_3_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_3_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_3_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_3_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_3_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_3_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_3_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_3_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_3_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_3_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_3_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_3_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_3_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_3_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_3_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_3_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_4_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_4_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_4_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_4_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_4_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_4_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_4_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_4_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_4_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_4_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_4_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_4_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_4_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_4_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_4_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_4_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_5_SERVER_URI`: URI to connect to LDAP server, such as "ldap://ldap.example.com:389" (non-SSL) or "ldaps://ldap.example.com:636" (SSL). Multiple LDAP servers may be specified by separating with spaces or commas. LDAP authentication is disabled if this parameter is empty. (string, default=`\"\"`)\n* `AUTH_LDAP_5_BIND_DN`: DN (Distinguished Name) of user to bind for all search queries. This is the system user account we will use to login to query LDAP for other user information. Refer to the documentation for example syntax. (string, default=`\"\"`)\n* `AUTH_LDAP_5_BIND_PASSWORD`: Password used to bind LDAP user account. (string, default=`\"\"`)\n* `AUTH_LDAP_5_START_TLS`: Whether to enable TLS when the LDAP connection is not using SSL. (boolean, default=`False`)\n* `AUTH_LDAP_5_CONNECTION_OPTIONS`: Additional options to set for the LDAP connection. LDAP referrals are disabled by default (to prevent certain LDAP queries from hanging with AD). Option names should be strings (e.g. "OPT_REFERRALS"). Refer to https://www.python-ldap.org/doc/html/ldap.html#options for possible options and values that can be set. (nested object, default=`{'OPT_REFERRALS': 0, 'OPT_NETWORK_TIMEOUT': 30}`)\n* `AUTH_LDAP_5_USER_SEARCH`: LDAP search query to find users. Any user that matches the given pattern will be able to login to the service. The user should also be mapped into an organization (as defined in the AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries need to be supported use of "LDAPUnion" is possible. See the documentation for details. (list, default=`[]`)\n* `AUTH_LDAP_5_USER_DN_TEMPLATE`: Alternative to user search, if user DNs are all of the same format. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of AUTH_LDAP_USER_SEARCH. (string, default=`\"\"`)\n* `AUTH_LDAP_5_USER_ATTR_MAP`: Mapping of LDAP user schema to API user attributes. The default setting is valid for ActiveDirectory but users with other LDAP configurations may need to change the values. Refer to the documentation for additional details. (nested object, default=`{}`)\n* `AUTH_LDAP_5_GROUP_SEARCH`: Users are mapped to organizations based on their membership in LDAP groups. This setting defines the LDAP search query to find groups. Unlike the user search, group search does not support LDAPSearchUnion. (list, default=`[]`)\n* `AUTH_LDAP_5_GROUP_TYPE`: The group type may need to be changed based on the type of the LDAP server. Values are listed at: https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups (choice)\n - `PosixGroupType`\n - `GroupOfNamesType`\n - `GroupOfUniqueNamesType`\n - `ActiveDirectoryGroupType`\n - `OrganizationalRoleGroupType`\n - `MemberDNGroupType` (default)\n - `NestedGroupOfNamesType`\n - `NestedGroupOfUniqueNamesType`\n - `NestedActiveDirectoryGroupType`\n - `NestedOrganizationalRoleGroupType`\n - `NestedMemberDNGroupType`\n - `PosixUIDGroupType`\n* `AUTH_LDAP_5_GROUP_TYPE_PARAMS`: Key value parameters to send the chosen group type init method. (nested object, default=`OrderedDict([('member_attr', 'member'), ('name_attr', 'cn')])`)\n* `AUTH_LDAP_5_REQUIRE_GROUP`: Group DN required to login. If specified, user must be a member of this group to login via LDAP. If not set, everyone in LDAP that matches the user search will be able to login to the service. Only one require group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_5_DENY_GROUP`: Group DN denied from login. If specified, user will not be allowed to login if a member of this group. Only one deny group is supported. (string, default=`\"\"`)\n* `AUTH_LDAP_5_USER_FLAGS_BY_GROUP`: Retrieve users from a given group. At this time, superuser and system auditors are the only groups supported. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `AUTH_LDAP_5_ORGANIZATION_MAP`: Mapping between organization admins/users and LDAP groups. This controls which users are placed into which organizations relative to their LDAP group memberships. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `AUTH_LDAP_5_TEAM_MAP`: Mapping between team members (users) and LDAP groups. Configuration details are available in the documentation. (nested object, default=`{}`)\n* `RADIUS_SERVER`: Hostname/IP of RADIUS server. RADIUS authentication is disabled if this setting is empty. (string, default=`\"\"`)\n* `RADIUS_PORT`: Port of RADIUS server. (integer, default=`1812`)\n* `RADIUS_SECRET`: Shared secret for authenticating to RADIUS server. (string, default=`\"\"`)\n* `TACACSPLUS_HOST`: Hostname of TACACS+ server. (string, default=`\"\"`)\n* `TACACSPLUS_PORT`: Port number of TACACS+ server. (integer, default=`49`)\n* `TACACSPLUS_SECRET`: Shared secret for authenticating to TACACS+ server. (string, default=`\"\"`)\n* `TACACSPLUS_SESSION_TIMEOUT`: TACACS+ session timeout value in seconds, 0 disables timeout. (integer, default=`5`)\n* `TACACSPLUS_AUTH_PROTOCOL`: Choose the authentication protocol used by TACACS+ client. (choice)\n - `ascii` (default)\n - `pap`\n* `TACACSPLUS_REM_ADDR`: Enable the client address sending by TACACS+ client. (boolean, default=`False`)\n\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_KEY`: The OAuth2 key from your web application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET`: The OAuth2 secret from your web application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS`: Update this setting to restrict the domains who are allowed to login using Google OAuth2. (list, default=`[]`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_AUTH_EXTRA_ARGUMENTS`: Extra arguments for Google OAuth2 login. You can restrict it to only allow a single domain to authenticate, even if the user is logged in with multple Google accounts. Refer to the documentation for more detail. (nested object, default=`{}`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_KEY`: The OAuth2 key (Client ID) from your GitHub developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_SECRET`: The OAuth2 secret (Client Secret) from your GitHub developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORG_NAME`: The name of your GitHub organization, as used in your organization's URL: https://github.com/<yourorg>/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_TEAM_ID`: Find the numeric team ID using the Github API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise developer application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME`: The name of your GitHub Enterprise organization, as used in your organization's URL: https://github.com/<yourorg>/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL`: The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL`: The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github Enterprise documentation for more details. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY`: The OAuth2 key (Client ID) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_SECRET`: The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID`: Find the numeric team ID using the Github Enterprise API: http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/. (string, default=`\"\"`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_KEY`: The OAuth2 key (Client ID) from your Azure AD application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET`: The OAuth2 secret (Client Secret) from your Azure AD application. (string, default=`\"\"`)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_AZUREAD_OAUTH2_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_OIDC_KEY`: The OIDC key (Client ID) from your IDP. (string, default=`\"\"`)\n* `SOCIAL_AUTH_OIDC_SECRET`: The OIDC secret (Client Secret) from your IDP. (string, default=`\"\"`)\n* `SOCIAL_AUTH_OIDC_OIDC_ENDPOINT`: The URL for your OIDC provider including the path up to /.well-known/openid-configuration (string, default=`\"\"`)\n* `SOCIAL_AUTH_OIDC_VERIFY_SSL`: Verify the OIDC provider ssl certificate. (boolean, default=`True`)\n* `SAML_AUTO_CREATE_OBJECTS`: When enabled (the default), mapped Organizations and Teams will be created automatically on successful SAML login. (boolean, default=`True`)\n\n\n* `SOCIAL_AUTH_SAML_SP_ENTITY_ID`: The application-defined unique identifier used as the audience of the SAML service provider (SP) configuration. This is usually the URL for the service. (string, default=`\"\"`)\n* `SOCIAL_AUTH_SAML_SP_PUBLIC_CERT`: Create a keypair to use as a service provider (SP) and include the certificate content here. (string, required)\n* `SOCIAL_AUTH_SAML_SP_PRIVATE_KEY`: Create a keypair to use as a service provider (SP) and include the private key content here. (string, required)\n* `SOCIAL_AUTH_SAML_ORG_INFO`: Provide the URL, display name, and the name of your app. Refer to the documentation for example syntax. (nested object, required)\n* `SOCIAL_AUTH_SAML_TECHNICAL_CONTACT`: Provide the name and email address of the technical contact for your service provider. Refer to the documentation for example syntax. (nested object, required)\n* `SOCIAL_AUTH_SAML_SUPPORT_CONTACT`: Provide the name and email address of the support contact for your service provider. Refer to the documentation for example syntax. (nested object, required)\n* `SOCIAL_AUTH_SAML_ENABLED_IDPS`: Configure the Entity ID, SSO URL and certificate for each identity provider (IdP) in use. Multiple SAML IdPs are supported. Some IdPs may provide user data using attribute names that differ from the default OIDs. Attribute names may be overridden for each IdP. Refer to the Ansible documentation for additional details and syntax. (nested object, default=`{}`)\n* `SOCIAL_AUTH_SAML_SECURITY_CONFIG`: A dict of key value pairs that are passed to the underlying python-saml security setting https://github.com/onelogin/python-saml#settings (nested object, default=`{'requestedAuthnContext': False}`)\n* `SOCIAL_AUTH_SAML_SP_EXTRA`: A dict of key value pairs to be passed to the underlying python-saml Service Provider configuration setting. (nested object, default=`None`)\n* `SOCIAL_AUTH_SAML_EXTRA_DATA`: A list of tuples that maps IDP attributes to extra_attributes. Each attribute will be a list of values, even if only 1 value. (list, default=`None`)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_MAP`: Mapping to organization admins/users from social auth accounts. This setting\ncontrols which users are placed into which organizations based on their\nusername and email address. Configuration details are available in the\ndocumentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_SAML_TEAM_MAP`: Mapping of team members (users) from social auth accounts. Configuration\ndetails are available in the documentation. (nested object, default=`None`)\n* `SOCIAL_AUTH_SAML_ORGANIZATION_ATTR`: Used to translate user organization membership. (nested object, default=`{}`)\n* `SOCIAL_AUTH_SAML_TEAM_ATTR`: Used to translate user team membership. (nested object, default=`{}`)\n* `SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR`: Used to map super users and system auditors from SAML. (nested object, default=`{}`)\n* `LOCAL_PASSWORD_MIN_LENGTH`: Minimum number of characters required in a local password. 0 means no minimum (integer, default=`0`)\n* `LOCAL_PASSWORD_MIN_DIGITS`: Minimum number of digit characters required in a local password. 0 means no minimum (integer, default=`0`)\n* `LOCAL_PASSWORD_MIN_UPPER`: Minimum number of uppercase characters required in a local password. 0 means no minimum (integer, default=`0`)\n* `LOCAL_PASSWORD_MIN_SPECIAL`: Minimum number of special characters required in a local password. 0 means no minimum (integer, default=`0`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Setting:\n\nMake a DELETE request to this resource to delete this setting.", + "operationId": "api_settings_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "SOCIAL_AUTH_GITHUB_ENTERPRISE_CALLBACK_URL": "https://towerhost/sso/complete/github-enterprise/", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY": "", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_ORGANIZATION_MAP": null, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET": "", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP": null + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL": "", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_CALLBACK_URL": "https://towerhost/sso/complete/github-enterprise/", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY": "", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_ORGANIZATION_MAP": null, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET": "", + "SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP": null, + "SOCIAL_AUTH_GITHUB_ENTERPRISE_URL": "" + } + }, + "schema": { + "$ref": "#/definitions/SettingSingleton" + } + }, + "400": { + "examples": { + "application/json": { + "__all__": [ + "Cannot enable log aggregator without providing type." + ] + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single setting", + "tags": [ + "api" + ] + } + }, + "/api/v2/system_job_templates/": { + "get": { + "description": "system job templates.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of system job templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more system job template records. \n\n## Results\n\nEach system job template data structure includes the following fields:\n\n* `id`: Database ID for this system job template. (integer)\n* `type`: Data type for this system job template. (choice)\n* `url`: URL for this system job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this system job template was created. (datetime)\n* `modified`: Timestamp when this system job template was last modified. (datetime)\n* `name`: Name of this system job template. (string)\n* `description`: Optional description of this system job template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `execution_environment`: The container image to be used for execution. (id)\n* `job_type`: (choice)\n - `\"\"`: ---------\n - `cleanup_jobs`: Remove jobs older than a certain number of days\n - `cleanup_activitystream`: Remove activity stream entries older than a certain number of days\n - `cleanup_sessions`: Removes expired browser sessions from the database\n - `cleanup_tokens`: Removes expired OAuth 2 access tokens and refresh tokens\n\n\n\n## Sorting\n\nTo specify that system job templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_system_job_templates_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/SystemJobTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/system_job_templates/{id}/": { + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this system job template. (integer)\n* `type`: Data type for this system job template. (choice)\n* `url`: URL for this system job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this system job template was created. (datetime)\n* `modified`: Timestamp when this system job template was last modified. (datetime)\n* `name`: Name of this system job template. (string)\n* `description`: Optional description of this system job template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `execution_environment`: The container image to be used for execution. (id)\n* `job_type`: (choice)\n - `\"\"`: ---------\n - `cleanup_jobs`: Remove jobs older than a certain number of days\n - `cleanup_activitystream`: Remove activity stream entries older than a certain number of days\n - `cleanup_sessions`: Removes expired browser sessions from the database\n - `cleanup_tokens`: Removes expired OAuth 2 access tokens and refresh tokens", + "operationId": "api_system_job_templates_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/SystemJobTemplate" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single system job template", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/system_job_templates/{id}/jobs/": { + "get": { + "description": "system jobs associated with the selected\nsystem job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of system jobs\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more system job records. \n\n## Results\n\nEach system job data structure includes the following fields:\n\n* `id`: Database ID for this system job. (integer)\n* `type`: Data type for this system job. (choice)\n* `url`: URL for this system job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this system job was created. (datetime)\n* `modified`: Timestamp when this system job was last modified. (datetime)\n* `name`: Name of this system job. (string)\n* `description`: Optional description of this system job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `system_job_template`: (id)\n* `job_type`: (choice)\n - `\"\"`: ---------\n - `cleanup_jobs`: Remove jobs older than a certain number of days\n - `cleanup_activitystream`: Remove activity stream entries older than a certain number of days\n - `cleanup_sessions`: Removes expired browser sessions from the database\n - `cleanup_tokens`: Removes expired OAuth 2 access tokens and refresh tokens\n* `extra_vars`: (string)\n* `result_stdout`: (field)\n\n\n\n## Sorting\n\nTo specify that system jobs are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_system_job_templates_jobs_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/SystemJobList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/system_job_templates/{id}/launch/": { + "get": { + "description": "\nVariables specified inside of the parameter `extra_vars` are passed to the\nsystem job task as command line parameters. These tasks can be run manually\non the host system via the `awx-manage` command.\n\nFor example on `cleanup_jobs` and `cleanup_activitystream`:\n\n`{\"extra_vars\": {\"days\": 30}}`\n\nWhich will act on data older than 30 days.\n\nFor `cleanup_activitystream` and `cleanup_jobs` commands, providing\n`\"dry_run\": true` inside of `extra_vars` will show items that will be\nremoved without deleting them.\n\nEach individual system job task has its own default values, which are\napplicable either when running it from the command line or launching its\nsystem job template with empty `extra_vars`.\n\n - Defaults for `cleanup_activitystream`: days=90\n - Defaults for `cleanup_jobs`: days=90\n\nIf successful, the response status code will be 202. If the job cannot be\nlaunched, a 405 status code will be returned.", + "operationId": "api_system_job_templates_launch_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Empty" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a POST request to this resource to launch the system job template.", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "\nVariables specified inside of the parameter `extra_vars` are passed to the\nsystem job task as command line parameters. These tasks can be run manually\non the host system via the `awx-manage` command.\n\nFor example on `cleanup_jobs` and `cleanup_activitystream`:\n\n`{\"extra_vars\": {\"days\": 30}}`\n\nWhich will act on data older than 30 days.\n\nFor `cleanup_activitystream` and `cleanup_jobs` commands, providing\n`\"dry_run\": true` inside of `extra_vars` will show items that will be\nremoved without deleting them.\n\nEach individual system job task has its own default values, which are\napplicable either when running it from the command line or launching its\nsystem job template with empty `extra_vars`.\n\n - Defaults for `cleanup_activitystream`: days=90\n - Defaults for `cleanup_jobs`: days=90\n\nIf successful, the response status code will be 202. If the job cannot be\nlaunched, a 405 status code will be returned.", + "operationId": "api_system_job_templates_launch_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Empty" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Empty" + } + } + }, + "summary": "Make a POST request to this resource to launch the system job template.", + "tags": [ + "api" + ] + } + }, + "/api/v2/system_job_templates/{id}/notification_templates_error/": { + "get": { + "description": "notification templates associated with the selected\nsystem job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_system_job_templates_notification_templates_error_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\nsystem job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_system_job_templates_notification_templates_error_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/system_job_templates/{id}/notification_templates_started/": { + "get": { + "description": "notification templates associated with the selected\nsystem job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_system_job_templates_notification_templates_started_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\nsystem job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_system_job_templates_notification_templates_started_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/system_job_templates/{id}/notification_templates_success/": { + "get": { + "description": "notification templates associated with the selected\nsystem job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_system_job_templates_notification_templates_success_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\nsystem job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_system_job_templates_notification_templates_success_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/system_job_templates/{id}/schedules/": { + "get": { + "description": "schedules associated with the selected\nsystem job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of schedules\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more schedule records. \n\n## Results\n\nEach schedule data structure includes the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n## Sorting\n\nTo specify that schedules are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_system_job_templates_schedules_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Schedule" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "schedules associated with the selected\nsystem job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of schedules\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more schedule records. \n\n## Results\n\nEach schedule data structure includes the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n## Sorting\n\nTo specify that schedules are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_system_job_templates_schedules_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Schedule" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Schedule" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/system_jobs/": { + "get": { + "description": "system jobs.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of system jobs\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more system job records. \n\n## Results\n\nEach system job data structure includes the following fields:\n\n* `id`: Database ID for this system job. (integer)\n* `type`: Data type for this system job. (choice)\n* `url`: URL for this system job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this system job was created. (datetime)\n* `modified`: Timestamp when this system job was last modified. (datetime)\n* `name`: Name of this system job. (string)\n* `description`: Optional description of this system job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `system_job_template`: (id)\n* `job_type`: (choice)\n - `\"\"`: ---------\n - `cleanup_jobs`: Remove jobs older than a certain number of days\n - `cleanup_activitystream`: Remove activity stream entries older than a certain number of days\n - `cleanup_sessions`: Removes expired browser sessions from the database\n - `cleanup_tokens`: Removes expired OAuth 2 access tokens and refresh tokens\n* `extra_vars`: (string)\n* `result_stdout`: (field)\n\n\n\n## Sorting\n\nTo specify that system jobs are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_system_jobs_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/SystemJobList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/system_jobs/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this system job. (integer)\n* `type`: Data type for this system job. (choice)\n* `url`: URL for this system job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this system job was created. (datetime)\n* `modified`: Timestamp when this system job was last modified. (datetime)\n* `name`: Name of this system job. (string)\n* `description`: Optional description of this system job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_args`: (string)\n* `job_cwd`: (string)\n* `job_env`: (json)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `result_traceback`: (string)\n* `event_processing_finished`: Indicates whether all of the events generated by this unified job have been saved to the database. (boolean)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `system_job_template`: (id)\n* `job_type`: (choice)\n - `\"\"`: ---------\n - `cleanup_jobs`: Remove jobs older than a certain number of days\n - `cleanup_activitystream`: Remove activity stream entries older than a certain number of days\n - `cleanup_sessions`: Removes expired browser sessions from the database\n - `cleanup_tokens`: Removes expired OAuth 2 access tokens and refresh tokens\n* `extra_vars`: (string)\n* `result_stdout`: (field)\n\n\n\n\n\n# Delete a System Job:\n\nMake a DELETE request to this resource to delete this system job.", + "operationId": "api_system_jobs_delete", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "204": { + "description": "" + }, + "403": { + "examples": { + "application/json": { + "detail": "Cannot delete running job resource." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single system job", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this system job. (integer)\n* `type`: Data type for this system job. (choice)\n* `url`: URL for this system job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this system job was created. (datetime)\n* `modified`: Timestamp when this system job was last modified. (datetime)\n* `name`: Name of this system job. (string)\n* `description`: Optional description of this system job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_args`: (string)\n* `job_cwd`: (string)\n* `job_env`: (json)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `result_traceback`: (string)\n* `event_processing_finished`: Indicates whether all of the events generated by this unified job have been saved to the database. (boolean)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `system_job_template`: (id)\n* `job_type`: (choice)\n - `\"\"`: ---------\n - `cleanup_jobs`: Remove jobs older than a certain number of days\n - `cleanup_activitystream`: Remove activity stream entries older than a certain number of days\n - `cleanup_sessions`: Removes expired browser sessions from the database\n - `cleanup_tokens`: Removes expired OAuth 2 access tokens and refresh tokens\n* `extra_vars`: (string)\n* `result_stdout`: (field)\n\n\n\n\n\n# Delete a System Job:\n\nMake a DELETE request to this resource to delete this system job.", + "operationId": "api_system_jobs_read", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "canceled_on": null, + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "elapsed": 0.0, + "event_processing_finished": false, + "execution_environment": null, + "execution_node": "", + "extra_vars": "", + "failed": false, + "finished": null, + "id": 1, + "job_args": "", + "job_cwd": "", + "job_env": {}, + "job_explanation": "", + "job_type": "", + "launch_type": "manual", + "launched_by": {}, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "", + "related": { + "cancel": "/api/v2/system_jobs/1/cancel/", + "events": "/api/v2/system_jobs/1/events/" + }, + "result_stdout": "Standard Output too large to display (1048577 bytes), only download supported for sizes over 1048576 bytes.", + "result_traceback": "", + "started": null, + "status": "new", + "summary_fields": { + "user_capabilities": { + "delete": true, + "start": false + } + }, + "system_job_template": null, + "type": "system_job", + "unified_job_template": null, + "url": "/api/v2/system_jobs/1/", + "work_unit_id": null + } + }, + "schema": { + "$ref": "#/definitions/SystemJob" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single system job", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/system_jobs/{id}/cancel/": { + "get": { + "description": "record containing the following fields:\n\n* `can_cancel`: (boolean)", + "operationId": "api_system_jobs_cancel_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/SystemJobCancel" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single system job", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "record containing the following fields:\n\n* `can_cancel`: (boolean)", + "operationId": "api_system_jobs_cancel_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/SystemJobCancel" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/SystemJobCancel" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single system job", + "tags": [ + "api" + ] + } + }, + "/api/v2/system_jobs/{id}/events/": { + "get": { + "description": "system job events associated with the selected\nsystem job.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of system job events\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more system job event records. \n\n## Results\n\nEach system job event data structure includes the following fields:\n\n* `id`: Database ID for this system job event. (integer)\n* `type`: Data type for this system job event. (choice)\n* `url`: URL for this system job event. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this system job event was created. (datetime)\n* `modified`: Timestamp when this system job event was last modified. (datetime)\n* `event`: (field)\n* `counter`: (integer)\n* `event_display`: (string)\n* `event_data`: (json)\n* `failed`: (field)\n* `changed`: (field)\n* `uuid`: (string)\n* `stdout`: (string)\n* `start_line`: (integer)\n* `end_line`: (integer)\n* `verbosity`: (integer)\n* `system_job`: (id)\n\n\n\n## Sorting\n\nTo specify that system job events are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_system_jobs_events_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/SystemJobEvent" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/system_jobs/{id}/notifications/": { + "get": { + "description": "notifications associated with the selected\nsystem job.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notifications\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification records. \n\n## Results\n\nEach notification data structure includes the following fields:\n\n* `id`: Database ID for this notification. (integer)\n* `type`: Data type for this notification. (choice)\n* `url`: URL for this notification. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification was created. (datetime)\n* `modified`: Timestamp when this notification was last modified. (datetime)\n* `notification_template`: (id)\n* `error`: (string)\n* `status`: (choice)\n - `pending`: Pending\n - `successful`: Successful\n - `failed`: Failed\n* `notifications_sent`: (integer)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `recipients`: (string)\n* `subject`: (string)\n* `body`: Notification body (json)\n\n\n\n## Sorting\n\nTo specify that notifications are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_system_jobs_notifications_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Notification" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/teams/": { + "get": { + "description": "teams.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of teams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more team records. \n\n## Results\n\nEach team data structure includes the following fields:\n\n* `id`: Database ID for this team. (integer)\n* `type`: Data type for this team. (choice)\n* `url`: URL for this team. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this team was created. (datetime)\n* `modified`: Timestamp when this team was last modified. (datetime)\n* `name`: Name of this team. (string)\n* `description`: Optional description of this team. (string)\n* `organization`: (id)\n\n\n\n## Sorting\n\nTo specify that teams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_teams_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Team" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "teams.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of teams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more team records. \n\n## Results\n\nEach team data structure includes the following fields:\n\n* `id`: Database ID for this team. (integer)\n* `type`: Data type for this team. (choice)\n* `url`: URL for this team. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this team was created. (datetime)\n* `modified`: Timestamp when this team was last modified. (datetime)\n* `name`: Name of this team. (string)\n* `description`: Optional description of this team. (string)\n* `organization`: (id)\n\n\n\n## Sorting\n\nTo specify that teams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_teams_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Team" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Team" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/teams/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this team. (integer)\n* `type`: Data type for this team. (choice)\n* `url`: URL for this team. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this team was created. (datetime)\n* `modified`: Timestamp when this team was last modified. (datetime)\n* `name`: Name of this team. (string)\n* `description`: Optional description of this team. (string)\n* `organization`: (id)\n\n\n\n\n\n# Update a Team:\n\nMake a PUT or PATCH request to this resource to update this\nteam. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this team. (string, required)\n* `description`: Optional description of this team. (string, default=`\"\"`)\n* `organization`: (id, required)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Team:\n\nMake a DELETE request to this resource to delete this team.", + "operationId": "api_teams_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single team", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this team. (integer)\n* `type`: Data type for this team. (choice)\n* `url`: URL for this team. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this team was created. (datetime)\n* `modified`: Timestamp when this team was last modified. (datetime)\n* `name`: Name of this team. (string)\n* `description`: Optional description of this team. (string)\n* `organization`: (id)\n\n\n\n\n\n# Update a Team:\n\nMake a PUT or PATCH request to this resource to update this\nteam. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this team. (string, required)\n* `description`: Optional description of this team. (string, default=`\"\"`)\n* `organization`: (id, required)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Team:\n\nMake a DELETE request to this resource to delete this team.", + "operationId": "api_teams_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Team" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single team", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this team. (integer)\n* `type`: Data type for this team. (choice)\n* `url`: URL for this team. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this team was created. (datetime)\n* `modified`: Timestamp when this team was last modified. (datetime)\n* `name`: Name of this team. (string)\n* `description`: Optional description of this team. (string)\n* `organization`: (id)\n\n\n\n\n\n# Update a Team:\n\nMake a PUT or PATCH request to this resource to update this\nteam. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this team. (string, required)\n* `description`: Optional description of this team. (string, default=`\"\"`)\n* `organization`: (id, required)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Team:\n\nMake a DELETE request to this resource to delete this team.", + "operationId": "api_teams_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Team" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Team" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single team", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this team. (integer)\n* `type`: Data type for this team. (choice)\n* `url`: URL for this team. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this team was created. (datetime)\n* `modified`: Timestamp when this team was last modified. (datetime)\n* `name`: Name of this team. (string)\n* `description`: Optional description of this team. (string)\n* `organization`: (id)\n\n\n\n\n\n# Update a Team:\n\nMake a PUT or PATCH request to this resource to update this\nteam. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this team. (string, required)\n* `description`: Optional description of this team. (string, default=`\"\"`)\n* `organization`: (id, required)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Team:\n\nMake a DELETE request to this resource to delete this team.", + "operationId": "api_teams_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Team" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/Team" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single team", + "tags": [ + "api" + ] + } + }, + "/api/v2/teams/{id}/access_list/": { + "get": { + "description": "users.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_teams_access_list_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ResourceAccessListElement" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/teams/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\nteam.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_teams_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/teams/{id}/credentials/": { + "get": { + "description": "credentials associated with the selected\nteam.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_teams_credentials_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "cloud": false, + "created": "2018-02-01T08:00:00.000000Z", + "credential_type": 1, + "description": "", + "id": 1, + "inputs": { + "username": "someusername" + }, + "kind": "ssh", + "kubernetes": false, + "managed": false, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "Some name", + "related": { + "access_list": "/api/v2/credentials/1/access_list/", + "activity_stream": "/api/v2/credentials/1/activity_stream/", + "copy": "/api/v2/credentials/1/copy/", + "credential_type": "/api/v2/credential_types/1/", + "input_sources": "/api/v2/credentials/1/input_sources/", + "object_roles": "/api/v2/credentials/1/object_roles/", + "organization": "/api/v2/organizations/1/", + "owner_teams": "/api/v2/credentials/1/owner_teams/", + "owner_users": "/api/v2/credentials/1/owner_users/" + }, + "summary_fields": { + "credential_type": { + "description": "", + "id": 1, + "name": "Machine" + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the credential", + "id": 19, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the credential", + "id": 21, + "name": "Read" + }, + "use_role": { + "description": "Can use the credential in a job template", + "id": 20, + "name": "Use" + } + }, + "organization": { + "description": "test-org-desc", + "id": 1, + "name": "test-org" + }, + "owners": [ + { + "description": "test-org-desc", + "id": 1, + "name": "test-org", + "type": "organization", + "url": "/api/v2/organizations/1/" + }, + { + "description": "", + "id": 1, + "name": "test-team", + "type": "team", + "url": "/api/v2/teams/1/" + } + ], + "user_capabilities": { + "copy": false, + "delete": false, + "edit": false, + "use": true + } + }, + "type": "credential", + "url": "/api/v2/credentials/1/" + } + ] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/TeamCredentialSerializerCreate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "credentials associated with the selected\nteam.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_teams_credentials_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "credential_type": 1, + "inputs": { + "username": "someusername" + }, + "name": "Some name", + "team": 1 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "cloud": false, + "created": "2018-02-01T08:00:00.000000Z", + "credential_type": 1, + "description": "", + "id": 1, + "inputs": { + "username": "someusername" + }, + "kind": "ssh", + "kubernetes": false, + "managed": false, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "Some name", + "related": { + "access_list": "/api/v2/credentials/1/access_list/", + "activity_stream": "/api/v2/credentials/1/activity_stream/", + "copy": "/api/v2/credentials/1/copy/", + "credential_type": "/api/v2/credential_types/1/", + "input_sources": "/api/v2/credentials/1/input_sources/", + "object_roles": "/api/v2/credentials/1/object_roles/", + "organization": "/api/v2/organizations/1/", + "owner_teams": "/api/v2/credentials/1/owner_teams/", + "owner_users": "/api/v2/credentials/1/owner_users/" + }, + "summary_fields": { + "credential_type": { + "description": "", + "id": 1, + "name": "Machine" + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the credential", + "id": 19, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the credential", + "id": 21, + "name": "Read" + }, + "use_role": { + "description": "Can use the credential in a job template", + "id": 20, + "name": "Use" + } + }, + "organization": { + "description": "test-org-desc", + "id": 1, + "name": "test-org" + }, + "owners": [ + { + "description": "test-org-desc", + "id": 1, + "name": "test-org", + "type": "organization", + "url": "/api/v2/organizations/1/" + }, + { + "description": "", + "id": 1, + "name": "test-team", + "type": "team", + "url": "/api/v2/teams/1/" + } + ], + "user_capabilities": { + "copy": false, + "delete": true, + "edit": true, + "use": true + } + }, + "type": "credential", + "url": "/api/v2/credentials/1/" + } + }, + "schema": { + "$ref": "#/definitions/TeamCredentialSerializerCreate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/teams/{id}/object_roles/": { + "get": { + "description": "roles associated with the selected\nteam.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of roles\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more role records. \n\n## Results\n\nEach role data structure includes the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)\n\n\n\n## Sorting\n\nTo specify that roles are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_teams_object_roles_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Role" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/teams/{id}/projects/": { + "get": { + "description": "projects associated with the selected\nteam.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of projects\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more project records. \n\n## Results\n\nEach project data structure includes the following fields:\n\n* `id`: Database ID for this project. (integer)\n* `type`: Data type for this project. (choice)\n* `url`: URL for this project. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this project was created. (datetime)\n* `modified`: Timestamp when this project was last modified. (datetime)\n* `name`: Name of this project. (string)\n* `description`: Optional description of this project. (string)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean)\n* `credential`: (id)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `scm_revision`: The last revision fetched by a project update (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n* `organization`: The organization used to determine access to this template. (id)\n* `scm_update_on_launch`: Update the project when a job is launched that uses the project. (boolean)\n* `scm_update_cache_timeout`: The number of seconds after the last project update ran that a new project update will be launched as a job dependency. (integer)\n* `allow_override`: Allow changing the SCM branch or revision in a job template that uses this project. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run using this project. (id)\n* `signature_validation_credential`: An optional credential used for validating files in the project against unexpected changes. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n## Sorting\n\nTo specify that projects are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_teams_projects_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Project" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/teams/{id}/roles/": { + "get": { + "description": "\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of roles\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more role records. \n\n## Results\n\nEach role data structure includes the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)\n\n\n\n## Sorting\n\nTo specify that roles are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_teams_roles_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/RoleSerializerWithParentAccess" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of roles associated with the selected team.", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of roles\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more role records. \n\n## Results\n\nEach role data structure includes the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)\n\n\n\n## Sorting\n\nTo specify that roles are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_teams_roles_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "id": 2 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/RoleSerializerWithParentAccess" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of roles associated with the selected team.", + "tags": [ + "api" + ] + } + }, + "/api/v2/teams/{id}/users/": { + "get": { + "description": "users associated with the selected\nteam.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_teams_users_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/User" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "users associated with the selected\nteam.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_teams_users_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/User" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/User" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/tokens/": { + "get": { + "description": "access tokens.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of access tokens\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more access token records. \n\n## Results\n\nEach access token data structure includes the following fields:\n\n* `id`: Database ID for this access token. (integer)\n* `type`: Data type for this access token. (choice)\n* `url`: URL for this access token. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this access token was created. (datetime)\n* `modified`: Timestamp when this access token was last modified. (datetime)\n* `description`: Optional description of this access token. (string)\n* `user`: The user representing the token owner (id)\n* `token`: (string)\n* `refresh_token`: (field)\n* `application`: (id)\n* `expires`: (datetime)\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string)\n\n\n\n## Sorting\n\nTo specify that access tokens are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_tokens_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/OAuth2Token" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "access tokens.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of access tokens\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more access token records. \n\n## Results\n\nEach access token data structure includes the following fields:\n\n* `id`: Database ID for this access token. (integer)\n* `type`: Data type for this access token. (choice)\n* `url`: URL for this access token. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this access token was created. (datetime)\n* `modified`: Timestamp when this access token was last modified. (datetime)\n* `description`: Optional description of this access token. (string)\n* `user`: The user representing the token owner (id)\n* `token`: (string)\n* `refresh_token`: (field)\n* `application`: (id)\n* `expires`: (datetime)\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string)\n\n\n\n## Sorting\n\nTo specify that access tokens are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_tokens_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/OAuth2Token" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/OAuth2Token" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/tokens/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this access token. (integer)\n* `type`: Data type for this access token. (choice)\n* `url`: URL for this access token. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this access token was created. (datetime)\n* `modified`: Timestamp when this access token was last modified. (datetime)\n* `description`: Optional description of this access token. (string)\n* `user`: The user representing the token owner (id)\n* `token`: (string)\n* `refresh_token`: (field)\n* `application`: (id)\n* `expires`: (datetime)\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string)\n\n\n\n\n\n# Update an Access Token:\n\nMake a PUT or PATCH request to this resource to update this\naccess token. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `description`: Optional description of this access token. (string, default=`\"\"`)\n\n\n\n\n\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string, default=`\"write\"`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Access Token:\n\nMake a DELETE request to this resource to delete this access token.", + "operationId": "api_tokens_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single access token", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this access token. (integer)\n* `type`: Data type for this access token. (choice)\n* `url`: URL for this access token. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this access token was created. (datetime)\n* `modified`: Timestamp when this access token was last modified. (datetime)\n* `description`: Optional description of this access token. (string)\n* `user`: The user representing the token owner (id)\n* `token`: (string)\n* `refresh_token`: (field)\n* `application`: (id)\n* `expires`: (datetime)\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string)\n\n\n\n\n\n# Update an Access Token:\n\nMake a PUT or PATCH request to this resource to update this\naccess token. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `description`: Optional description of this access token. (string, default=`\"\"`)\n\n\n\n\n\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string, default=`\"write\"`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Access Token:\n\nMake a DELETE request to this resource to delete this access token.", + "operationId": "api_tokens_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/OAuth2TokenDetail" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single access token", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this access token. (integer)\n* `type`: Data type for this access token. (choice)\n* `url`: URL for this access token. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this access token was created. (datetime)\n* `modified`: Timestamp when this access token was last modified. (datetime)\n* `description`: Optional description of this access token. (string)\n* `user`: The user representing the token owner (id)\n* `token`: (string)\n* `refresh_token`: (field)\n* `application`: (id)\n* `expires`: (datetime)\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string)\n\n\n\n\n\n# Update an Access Token:\n\nMake a PUT or PATCH request to this resource to update this\naccess token. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `description`: Optional description of this access token. (string, default=`\"\"`)\n\n\n\n\n\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string, default=`\"write\"`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Access Token:\n\nMake a DELETE request to this resource to delete this access token.", + "operationId": "api_tokens_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/OAuth2TokenDetail" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/OAuth2TokenDetail" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single access token", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this access token. (integer)\n* `type`: Data type for this access token. (choice)\n* `url`: URL for this access token. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this access token was created. (datetime)\n* `modified`: Timestamp when this access token was last modified. (datetime)\n* `description`: Optional description of this access token. (string)\n* `user`: The user representing the token owner (id)\n* `token`: (string)\n* `refresh_token`: (field)\n* `application`: (id)\n* `expires`: (datetime)\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string)\n\n\n\n\n\n# Update an Access Token:\n\nMake a PUT or PATCH request to this resource to update this\naccess token. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `description`: Optional description of this access token. (string, default=`\"\"`)\n\n\n\n\n\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string, default=`\"write\"`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete an Access Token:\n\nMake a DELETE request to this resource to delete this access token.", + "operationId": "api_tokens_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/OAuth2TokenDetail" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/OAuth2TokenDetail" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single access token", + "tags": [ + "api" + ] + } + }, + "/api/v2/tokens/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\naccess token.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_tokens_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/unified_job_templates/": { + "get": { + "description": "unified job templates.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of unified job templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more unified job template records. \n\n## Results\n\nEach unified job template data structure includes the following fields:\n\n* `id`: Database ID for this unified job template. (integer)\n* `type`: Data type for this unified job template. (choice)\n* `url`: URL for this unified job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this unified job template was created. (datetime)\n* `modified`: Timestamp when this unified job template was last modified. (datetime)\n* `name`: Name of this unified job template. (string)\n* `description`: Optional description of this unified job template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `execution_environment`: The container image to be used for execution. (id)\n\n\n\n## Sorting\n\nTo specify that unified job templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_unified_job_templates_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 0, + "next": null, + "previous": null, + "results": [] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/UnifiedJobTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/unified_jobs/": { + "get": { + "description": "unified jobs.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of unified jobs\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more unified job records. \n\n## Results\n\nEach unified job data structure includes the following fields:\n\n* `id`: Database ID for this unified job. (integer)\n* `type`: Data type for this unified job. (choice)\n* `url`: URL for this unified job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this unified job was created. (datetime)\n* `modified`: Timestamp when this unified job was last modified. (datetime)\n* `name`: Name of this unified job. (string)\n* `description`: Optional description of this unified job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `execution_node`: The node the job executed on. (string)\n* `controller_node`: The instance that managed the execution environment. (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n\n\n\n## Sorting\n\nTo specify that unified jobs are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_unified_jobs_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/UnifiedJobList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/users/": { + "get": { + "description": "users.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 0, + "next": null, + "previous": null, + "results": [] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/User" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "users.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "email": "a@a.com", + "first_name": "a", + "is_superuser": false, + "last_name": "a", + "password": "r$TyKiOCb#ED", + "username": "affable" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "auth": [], + "created": "2018-02-01T08:00:00.000000Z", + "email": "a@a.com", + "external_account": null, + "first_name": "a", + "id": 3, + "is_superuser": false, + "is_system_auditor": false, + "last_login": null, + "last_name": "a", + "ldap_dn": "", + "modified": null, + "password": "$encrypted$", + "related": { + "access_list": "/api/v2/users/3/access_list/", + "activity_stream": "/api/v2/users/3/activity_stream/", + "admin_of_organizations": "/api/v2/users/3/admin_of_organizations/", + "authorized_tokens": "/api/v2/users/3/authorized_tokens/", + "credentials": "/api/v2/users/3/credentials/", + "organizations": "/api/v2/users/3/organizations/", + "personal_tokens": "/api/v2/users/3/personal_tokens/", + "projects": "/api/v2/users/3/projects/", + "roles": "/api/v2/users/3/roles/", + "teams": "/api/v2/users/3/teams/", + "tokens": "/api/v2/users/3/tokens/" + }, + "summary_fields": { + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "type": "user", + "url": "/api/v2/users/3/", + "username": "affable" + } + }, + "schema": { + "$ref": "#/definitions/User" + } + }, + "400": { + "examples": { + "application/json": { + "username": [ + "A user with that username already exists." + ] + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/users/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n\n\n# Update a User:\n\nMake a PUT or PATCH request to this resource to update this\nuser. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string, required)\n* `first_name`: (string, default=`\"\"`)\n* `last_name`: (string, default=`\"\"`)\n* `email`: (string, default=`\"\"`)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean, default=`False`)\n* `is_system_auditor`: (boolean, default=`False`)\n* `password`: Field used to change the password. (string, default=`\"\"`)\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a User:\n\nMake a DELETE request to this resource to delete this user.", + "operationId": "api_users_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single user", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n\n\n# Update a User:\n\nMake a PUT or PATCH request to this resource to update this\nuser. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string, required)\n* `first_name`: (string, default=`\"\"`)\n* `last_name`: (string, default=`\"\"`)\n* `email`: (string, default=`\"\"`)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean, default=`False`)\n* `is_system_auditor`: (boolean, default=`False`)\n* `password`: Field used to change the password. (string, default=`\"\"`)\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a User:\n\nMake a DELETE request to this resource to delete this user.", + "operationId": "api_users_read", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "auth": [], + "created": "2018-02-01T08:00:00.000000Z", + "email": "", + "external_account": null, + "first_name": "", + "id": 1, + "is_superuser": true, + "is_system_auditor": false, + "last_login": null, + "last_name": "", + "ldap_dn": "", + "modified": null, + "password": "$encrypted$", + "related": { + "access_list": "/api/v2/users/1/access_list/", + "activity_stream": "/api/v2/users/1/activity_stream/", + "admin_of_organizations": "/api/v2/users/1/admin_of_organizations/", + "authorized_tokens": "/api/v2/users/1/authorized_tokens/", + "credentials": "/api/v2/users/1/credentials/", + "organizations": "/api/v2/users/1/organizations/", + "personal_tokens": "/api/v2/users/1/personal_tokens/", + "projects": "/api/v2/users/1/projects/", + "roles": "/api/v2/users/1/roles/", + "teams": "/api/v2/users/1/teams/", + "tokens": "/api/v2/users/1/tokens/" + }, + "summary_fields": { + "user_capabilities": { + "delete": false, + "edit": true + } + }, + "type": "user", + "url": "/api/v2/users/1/", + "username": "admin" + } + }, + "schema": { + "$ref": "#/definitions/User" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single user", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n\n\n# Update a User:\n\nMake a PUT or PATCH request to this resource to update this\nuser. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string, required)\n* `first_name`: (string, default=`\"\"`)\n* `last_name`: (string, default=`\"\"`)\n* `email`: (string, default=`\"\"`)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean, default=`False`)\n* `is_system_auditor`: (boolean, default=`False`)\n* `password`: Field used to change the password. (string, default=`\"\"`)\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a User:\n\nMake a DELETE request to this resource to delete this user.", + "operationId": "api_users_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "last_login": "2018-02-01T08:00:00.000000Z" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "auth": [], + "created": "2018-02-01T08:00:00.000000Z", + "email": "", + "external_account": null, + "first_name": "", + "id": 1, + "is_superuser": true, + "is_system_auditor": false, + "last_login": null, + "last_name": "", + "ldap_dn": "", + "modified": null, + "password": "$encrypted$", + "related": { + "access_list": "/api/v2/users/1/access_list/", + "activity_stream": "/api/v2/users/1/activity_stream/", + "admin_of_organizations": "/api/v2/users/1/admin_of_organizations/", + "authorized_tokens": "/api/v2/users/1/authorized_tokens/", + "credentials": "/api/v2/users/1/credentials/", + "organizations": "/api/v2/users/1/organizations/", + "personal_tokens": "/api/v2/users/1/personal_tokens/", + "projects": "/api/v2/users/1/projects/", + "roles": "/api/v2/users/1/roles/", + "teams": "/api/v2/users/1/teams/", + "tokens": "/api/v2/users/1/tokens/" + }, + "summary_fields": { + "user_capabilities": { + "delete": false, + "edit": true + } + }, + "type": "user", + "url": "/api/v2/users/1/", + "username": "admin" + } + }, + "schema": { + "$ref": "#/definitions/User" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single user", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n\n\n# Update a User:\n\nMake a PUT or PATCH request to this resource to update this\nuser. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string, required)\n* `first_name`: (string, default=`\"\"`)\n* `last_name`: (string, default=`\"\"`)\n* `email`: (string, default=`\"\"`)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean, default=`False`)\n* `is_system_auditor`: (boolean, default=`False`)\n* `password`: Field used to change the password. (string, default=`\"\"`)\n\n\n\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a User:\n\nMake a DELETE request to this resource to delete this user.", + "operationId": "api_users_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/User" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/User" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single user", + "tags": [ + "api" + ] + } + }, + "/api/v2/users/{id}/access_list/": { + "get": { + "description": "users.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_access_list_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ResourceAccessListElement" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/users/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\nuser.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/users/{id}/admin_of_organizations/": { + "get": { + "description": "organizations of which the selected\nuser is an admin.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of organizations\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more organization records. \n\n## Results\n\nEach organization data structure includes the following fields:\n\n* `id`: Database ID for this organization. (integer)\n* `type`: Data type for this organization. (choice)\n* `url`: URL for this organization. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this organization was created. (datetime)\n* `modified`: Timestamp when this organization was last modified. (datetime)\n* `name`: Name of this organization. (string)\n* `description`: Optional description of this organization. (string)\n* `max_hosts`: Maximum number of hosts allowed to be managed by this organization. (integer)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run by this organization. (id)\n\n\n\n## Sorting\n\nTo specify that organizations are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_admin_of_organizations_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Organization" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/users/{id}/applications/": { + "get": { + "description": "applications.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of applications\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more application records. \n\n## Results\n\nEach application data structure includes the following fields:\n\n* `id`: Database ID for this application. (integer)\n* `type`: Data type for this application. (choice)\n* `url`: URL for this application. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this application was created. (datetime)\n* `modified`: Timestamp when this application was last modified. (datetime)\n* `name`: Name of this application. (string)\n* `description`: Optional description of this application. (string)\n* `client_id`: (string)\n* `client_secret`: Used for more stringent verification of access to an application when creating a token. (string)\n* `client_type`: Set to Public or Confidential depending on how secure the client device is. (choice)\n - `confidential`: Confidential\n - `public`: Public\n* `redirect_uris`: Allowed URIs list, space separated (string)\n* `authorization_grant_type`: The Grant type the user must use for acquire tokens for this application. (choice)\n - `authorization-code`: Authorization code\n - `password`: Resource owner password-based\n* `skip_authorization`: Set True to skip authorization step for completely trusted applications. (boolean)\n* `organization`: Organization containing this application. (id)\n\n\n\n## Sorting\n\nTo specify that applications are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_applications_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/OAuth2Application" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "applications.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of applications\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more application records. \n\n## Results\n\nEach application data structure includes the following fields:\n\n* `id`: Database ID for this application. (integer)\n* `type`: Data type for this application. (choice)\n* `url`: URL for this application. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this application was created. (datetime)\n* `modified`: Timestamp when this application was last modified. (datetime)\n* `name`: Name of this application. (string)\n* `description`: Optional description of this application. (string)\n* `client_id`: (string)\n* `client_secret`: Used for more stringent verification of access to an application when creating a token. (string)\n* `client_type`: Set to Public or Confidential depending on how secure the client device is. (choice)\n - `confidential`: Confidential\n - `public`: Public\n* `redirect_uris`: Allowed URIs list, space separated (string)\n* `authorization_grant_type`: The Grant type the user must use for acquire tokens for this application. (choice)\n - `authorization-code`: Authorization code\n - `password`: Resource owner password-based\n* `skip_authorization`: Set True to skip authorization step for completely trusted applications. (boolean)\n* `organization`: Organization containing this application. (id)\n\n\n\n## Sorting\n\nTo specify that applications are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_applications_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/OAuth2Application" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/OAuth2Application" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/users/{id}/authorized_tokens/": { + "get": { + "description": "access tokens associated with the selected\nuser.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of access tokens\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more access token records. \n\n## Results\n\nEach access token data structure includes the following fields:\n\n* `id`: Database ID for this access token. (integer)\n* `type`: Data type for this access token. (choice)\n* `url`: URL for this access token. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this access token was created. (datetime)\n* `modified`: Timestamp when this access token was last modified. (datetime)\n* `description`: Optional description of this access token. (string)\n* `user`: The user representing the token owner (id)\n* `token`: (string)\n* `refresh_token`: (field)\n* `application`: (id)\n* `expires`: (datetime)\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string)\n\n\n\n## Sorting\n\nTo specify that access tokens are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_authorized_tokens_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/UserAuthorizedToken" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "access tokens associated with the selected\nuser.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of access tokens\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more access token records. \n\n## Results\n\nEach access token data structure includes the following fields:\n\n* `id`: Database ID for this access token. (integer)\n* `type`: Data type for this access token. (choice)\n* `url`: URL for this access token. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this access token was created. (datetime)\n* `modified`: Timestamp when this access token was last modified. (datetime)\n* `description`: Optional description of this access token. (string)\n* `user`: The user representing the token owner (id)\n* `token`: (string)\n* `refresh_token`: (field)\n* `application`: (id)\n* `expires`: (datetime)\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string)\n\n\n\n## Sorting\n\nTo specify that access tokens are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_authorized_tokens_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/UserAuthorizedToken" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/UserAuthorizedToken" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/users/{id}/credentials/": { + "get": { + "description": "credentials associated with the selected\nuser.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_credentials_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "cloud": false, + "created": "2018-02-01T08:00:00.000000Z", + "credential_type": 1, + "description": "", + "id": 1, + "inputs": { + "username": "someusername" + }, + "kind": "ssh", + "kubernetes": false, + "managed": false, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "Some name", + "related": { + "access_list": "/api/v2/credentials/1/access_list/", + "activity_stream": "/api/v2/credentials/1/activity_stream/", + "copy": "/api/v2/credentials/1/copy/", + "credential_type": "/api/v2/credential_types/1/", + "input_sources": "/api/v2/credentials/1/input_sources/", + "object_roles": "/api/v2/credentials/1/object_roles/", + "owner_teams": "/api/v2/credentials/1/owner_teams/", + "owner_users": "/api/v2/credentials/1/owner_users/", + "user": "/api/v2/users/1/" + }, + "summary_fields": { + "credential_type": { + "description": "", + "id": 1, + "name": "Machine" + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the credential", + "id": 2, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the credential", + "id": 4, + "name": "Read" + }, + "use_role": { + "description": "Can use the credential in a job template", + "id": 3, + "name": "Use" + } + }, + "owners": [ + { + "description": " ", + "id": 1, + "name": "alice", + "type": "user", + "url": "/api/v2/users/1/" + } + ], + "user_capabilities": { + "copy": false, + "delete": true, + "edit": true, + "use": true + } + }, + "type": "credential", + "url": "/api/v2/credentials/1/" + } + ] + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/UserCredentialSerializerCreate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "credentials associated with the selected\nuser.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_credentials_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "credential_type": 1, + "inputs": { + "username": "someusername" + }, + "name": "Some name", + "user": 1 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "cloud": false, + "created": "2018-02-01T08:00:00.000000Z", + "credential_type": 1, + "description": "", + "id": 1, + "inputs": { + "username": "someusername" + }, + "kind": "ssh", + "kubernetes": false, + "managed": false, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "Some name", + "related": { + "access_list": "/api/v2/credentials/1/access_list/", + "activity_stream": "/api/v2/credentials/1/activity_stream/", + "copy": "/api/v2/credentials/1/copy/", + "credential_type": "/api/v2/credential_types/1/", + "input_sources": "/api/v2/credentials/1/input_sources/", + "object_roles": "/api/v2/credentials/1/object_roles/", + "owner_teams": "/api/v2/credentials/1/owner_teams/", + "owner_users": "/api/v2/credentials/1/owner_users/", + "user": "/api/v2/users/1/" + }, + "summary_fields": { + "credential_type": { + "description": "", + "id": 1, + "name": "Machine" + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the credential", + "id": 2, + "name": "Admin" + }, + "read_role": { + "description": "May view settings for the credential", + "id": 4, + "name": "Read" + }, + "use_role": { + "description": "Can use the credential in a job template", + "id": 3, + "name": "Use" + } + }, + "owners": [ + { + "description": " ", + "id": 1, + "name": "alice", + "type": "user", + "url": "/api/v2/users/1/" + } + ], + "user_capabilities": { + "copy": false, + "delete": true, + "edit": true, + "use": true + } + }, + "type": "credential", + "url": "/api/v2/credentials/1/" + } + }, + "schema": { + "$ref": "#/definitions/UserCredentialSerializerCreate" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/users/{id}/organizations/": { + "get": { + "description": "organizations associated with the selected\nuser.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of organizations\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more organization records. \n\n## Results\n\nEach organization data structure includes the following fields:\n\n* `id`: Database ID for this organization. (integer)\n* `type`: Data type for this organization. (choice)\n* `url`: URL for this organization. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this organization was created. (datetime)\n* `modified`: Timestamp when this organization was last modified. (datetime)\n* `name`: Name of this organization. (string)\n* `description`: Optional description of this organization. (string)\n* `max_hosts`: Maximum number of hosts allowed to be managed by this organization. (integer)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run by this organization. (id)\n\n\n\n## Sorting\n\nTo specify that organizations are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_organizations_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Organization" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/users/{id}/personal_tokens/": { + "get": { + "description": "access tokens associated with the selected\nuser.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of access tokens\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more access token records. \n\n## Results\n\nEach access token data structure includes the following fields:\n\n* `id`: Database ID for this access token. (integer)\n* `type`: Data type for this access token. (choice)\n* `url`: URL for this access token. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this access token was created. (datetime)\n* `modified`: Timestamp when this access token was last modified. (datetime)\n* `description`: Optional description of this access token. (string)\n* `user`: The user representing the token owner (id)\n* `token`: (string)\n* `refresh_token`: (field)\n* `application`: (id)\n* `expires`: (datetime)\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string)\n\n\n\n## Sorting\n\nTo specify that access tokens are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_personal_tokens_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/UserPersonalToken" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "access tokens associated with the selected\nuser.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of access tokens\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more access token records. \n\n## Results\n\nEach access token data structure includes the following fields:\n\n* `id`: Database ID for this access token. (integer)\n* `type`: Data type for this access token. (choice)\n* `url`: URL for this access token. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this access token was created. (datetime)\n* `modified`: Timestamp when this access token was last modified. (datetime)\n* `description`: Optional description of this access token. (string)\n* `user`: The user representing the token owner (id)\n* `token`: (string)\n* `refresh_token`: (field)\n* `application`: (id)\n* `expires`: (datetime)\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string)\n\n\n\n## Sorting\n\nTo specify that access tokens are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_personal_tokens_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/UserPersonalToken" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/UserPersonalToken" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/users/{id}/projects/": { + "get": { + "description": "projects associated with the selected\nuser.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of projects\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more project records. \n\n## Results\n\nEach project data structure includes the following fields:\n\n* `id`: Database ID for this project. (integer)\n* `type`: Data type for this project. (choice)\n* `url`: URL for this project. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this project was created. (datetime)\n* `modified`: Timestamp when this project was last modified. (datetime)\n* `name`: Name of this project. (string)\n* `description`: Optional description of this project. (string)\n* `local_path`: Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project. (string)\n* `scm_type`: Specifies the source control system used to store the project. (choice)\n - `\"\"`: Manual\n - `git`: Git\n - `svn`: Subversion\n - `insights`: Red Hat Insights\n - `archive`: Remote Archive\n* `scm_url`: The location where the project is stored. (string)\n* `scm_branch`: Specific branch, tag or commit to checkout. (string)\n* `scm_refspec`: For git projects, an additional refspec to fetch. (string)\n* `scm_clean`: Discard any local changes before syncing the project. (boolean)\n* `scm_track_submodules`: Track submodules latest commits on defined branch. (boolean)\n* `scm_delete_on_update`: Delete the project before syncing. (boolean)\n* `credential`: (id)\n* `timeout`: The amount of time (in seconds) to run before the task is canceled. (integer)\n* `scm_revision`: The last revision fetched by a project update (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n* `organization`: The organization used to determine access to this template. (id)\n* `scm_update_on_launch`: Update the project when a job is launched that uses the project. (boolean)\n* `scm_update_cache_timeout`: The number of seconds after the last project update ran that a new project update will be launched as a job dependency. (integer)\n* `allow_override`: Allow changing the SCM branch or revision in a job template that uses this project. (boolean)\n* `custom_virtualenv`: Local absolute file path containing a custom Python virtualenv to use (string)\n* `default_environment`: The default execution environment for jobs run using this project. (id)\n* `signature_validation_credential`: An optional credential used for validating files in the project against unexpected changes. (id)\n* `last_update_failed`: (boolean)\n* `last_updated`: (datetime)\n\n\n\n## Sorting\n\nTo specify that projects are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_projects_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Project" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/users/{id}/roles/": { + "get": { + "description": "\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of roles\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more role records. \n\n## Results\n\nEach role data structure includes the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)\n\n\n\n## Sorting\n\nTo specify that roles are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_roles_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/RoleSerializerWithParentAccess" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of roles associated with the selected user.", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of roles\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more role records. \n\n## Results\n\nEach role data structure includes the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)\n\n\n\n## Sorting\n\nTo specify that roles are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_roles_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "id": 2 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/RoleSerializerWithParentAccess" + } + }, + "400": { + "examples": { + "application/json": { + "msg": "You cannot grant private credential access to another user" + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of roles associated with the selected user.", + "tags": [ + "api" + ] + } + }, + "/api/v2/users/{id}/teams/": { + "get": { + "description": "teams associated with the selected\nuser.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of teams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more team records. \n\n## Results\n\nEach team data structure includes the following fields:\n\n* `id`: Database ID for this team. (integer)\n* `type`: Data type for this team. (choice)\n* `url`: URL for this team. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this team was created. (datetime)\n* `modified`: Timestamp when this team was last modified. (datetime)\n* `name`: Name of this team. (string)\n* `description`: Optional description of this team. (string)\n* `organization`: (id)\n\n\n\n## Sorting\n\nTo specify that teams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_teams_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Team" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/users/{id}/tokens/": { + "get": { + "description": "access tokens associated with the selected\nuser.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of access tokens\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more access token records. \n\n## Results\n\nEach access token data structure includes the following fields:\n\n* `id`: Database ID for this access token. (integer)\n* `type`: Data type for this access token. (choice)\n* `url`: URL for this access token. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this access token was created. (datetime)\n* `modified`: Timestamp when this access token was last modified. (datetime)\n* `description`: Optional description of this access token. (string)\n* `user`: The user representing the token owner (id)\n* `token`: (string)\n* `refresh_token`: (field)\n* `application`: (id)\n* `expires`: (datetime)\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string)\n\n\n\n## Sorting\n\nTo specify that access tokens are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_tokens_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/OAuth2Token" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "access tokens associated with the selected\nuser.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of access tokens\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more access token records. \n\n## Results\n\nEach access token data structure includes the following fields:\n\n* `id`: Database ID for this access token. (integer)\n* `type`: Data type for this access token. (choice)\n* `url`: URL for this access token. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this access token was created. (datetime)\n* `modified`: Timestamp when this access token was last modified. (datetime)\n* `description`: Optional description of this access token. (string)\n* `user`: The user representing the token owner (id)\n* `token`: (string)\n* `refresh_token`: (field)\n* `application`: (id)\n* `expires`: (datetime)\n* `scope`: Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']. (string)\n\n\n\n## Sorting\n\nTo specify that access tokens are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_users_tokens_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/OAuth2Token" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/OAuth2Token" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_approval_templates/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow approval template. (integer)\n* `type`: Data type for this workflow approval template. (choice)\n* `url`: URL for this workflow approval template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow approval template was created. (datetime)\n* `modified`: Timestamp when this workflow approval template was last modified. (datetime)\n* `name`: Name of this workflow approval template. (string)\n* `description`: Optional description of this workflow approval template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `execution_environment`: The container image to be used for execution. (id)\n* `timeout`: The amount of time (in seconds) before the approval node expires and fails. (integer)\n\n\n\n\n\n# Update a Workflow Approval Template:\n\nMake a PUT or PATCH request to this resource to update this\nworkflow approval template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this workflow approval template. (string, required)\n* `description`: Optional description of this workflow approval template. (string, default=`\"\"`)\n\n\n\n\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `timeout`: The amount of time (in seconds) before the approval node expires and fails. (integer, default=`0`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Workflow Approval Template:\n\nMake a DELETE request to this resource to delete this workflow approval template.", + "operationId": "api_workflow_approval_templates_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow approval template", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow approval template. (integer)\n* `type`: Data type for this workflow approval template. (choice)\n* `url`: URL for this workflow approval template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow approval template was created. (datetime)\n* `modified`: Timestamp when this workflow approval template was last modified. (datetime)\n* `name`: Name of this workflow approval template. (string)\n* `description`: Optional description of this workflow approval template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `execution_environment`: The container image to be used for execution. (id)\n* `timeout`: The amount of time (in seconds) before the approval node expires and fails. (integer)\n\n\n\n\n\n# Update a Workflow Approval Template:\n\nMake a PUT or PATCH request to this resource to update this\nworkflow approval template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this workflow approval template. (string, required)\n* `description`: Optional description of this workflow approval template. (string, default=`\"\"`)\n\n\n\n\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `timeout`: The amount of time (in seconds) before the approval node expires and fails. (integer, default=`0`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Workflow Approval Template:\n\nMake a DELETE request to this resource to delete this workflow approval template.", + "operationId": "api_workflow_approval_templates_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowApprovalTemplate" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow approval template", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow approval template. (integer)\n* `type`: Data type for this workflow approval template. (choice)\n* `url`: URL for this workflow approval template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow approval template was created. (datetime)\n* `modified`: Timestamp when this workflow approval template was last modified. (datetime)\n* `name`: Name of this workflow approval template. (string)\n* `description`: Optional description of this workflow approval template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `execution_environment`: The container image to be used for execution. (id)\n* `timeout`: The amount of time (in seconds) before the approval node expires and fails. (integer)\n\n\n\n\n\n# Update a Workflow Approval Template:\n\nMake a PUT or PATCH request to this resource to update this\nworkflow approval template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this workflow approval template. (string, required)\n* `description`: Optional description of this workflow approval template. (string, default=`\"\"`)\n\n\n\n\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `timeout`: The amount of time (in seconds) before the approval node expires and fails. (integer, default=`0`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Workflow Approval Template:\n\nMake a DELETE request to this resource to delete this workflow approval template.", + "operationId": "api_workflow_approval_templates_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/WorkflowApprovalTemplate" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowApprovalTemplate" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow approval template", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow approval template. (integer)\n* `type`: Data type for this workflow approval template. (choice)\n* `url`: URL for this workflow approval template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow approval template was created. (datetime)\n* `modified`: Timestamp when this workflow approval template was last modified. (datetime)\n* `name`: Name of this workflow approval template. (string)\n* `description`: Optional description of this workflow approval template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `execution_environment`: The container image to be used for execution. (id)\n* `timeout`: The amount of time (in seconds) before the approval node expires and fails. (integer)\n\n\n\n\n\n# Update a Workflow Approval Template:\n\nMake a PUT or PATCH request to this resource to update this\nworkflow approval template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this workflow approval template. (string, required)\n* `description`: Optional description of this workflow approval template. (string, default=`\"\"`)\n\n\n\n\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `timeout`: The amount of time (in seconds) before the approval node expires and fails. (integer, default=`0`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Workflow Approval Template:\n\nMake a DELETE request to this resource to delete this workflow approval template.", + "operationId": "api_workflow_approval_templates_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/WorkflowApprovalTemplate" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowApprovalTemplate" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow approval template", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_approval_templates/{id}/approvals/": { + "get": { + "description": "workflow approvals associated with the selected\nworkflow approval template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow approvals\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow approval records. \n\n## Results\n\nEach workflow approval data structure includes the following fields:\n\n* `id`: Database ID for this workflow approval. (integer)\n* `type`: Data type for this workflow approval. (choice)\n* `url`: URL for this workflow approval. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow approval was created. (datetime)\n* `modified`: Timestamp when this workflow approval was last modified. (datetime)\n* `name`: Name of this workflow approval. (string)\n* `description`: Optional description of this workflow approval. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `can_approve_or_deny`: (field)\n* `approval_expiration`: (field)\n* `timed_out`: (boolean)\n\n\n\n## Sorting\n\nTo specify that workflow approvals are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_approval_templates_approvals_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowApprovalList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_approvals/": { + "get": { + "description": "workflow approvals.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow approvals\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow approval records. \n\n## Results\n\nEach workflow approval data structure includes the following fields:\n\n* `id`: Database ID for this workflow approval. (integer)\n* `type`: Data type for this workflow approval. (choice)\n* `url`: URL for this workflow approval. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow approval was created. (datetime)\n* `modified`: Timestamp when this workflow approval was last modified. (datetime)\n* `name`: Name of this workflow approval. (string)\n* `description`: Optional description of this workflow approval. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `can_approve_or_deny`: (field)\n* `approval_expiration`: (field)\n* `timed_out`: (boolean)\n\n\n\n## Sorting\n\nTo specify that workflow approvals are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_approvals_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowApprovalList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_approvals/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow approval. (integer)\n* `type`: Data type for this workflow approval. (choice)\n* `url`: URL for this workflow approval. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow approval was created. (datetime)\n* `modified`: Timestamp when this workflow approval was last modified. (datetime)\n* `name`: Name of this workflow approval. (string)\n* `description`: Optional description of this workflow approval. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_args`: (string)\n* `job_cwd`: (string)\n* `job_env`: (json)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `result_traceback`: (string)\n* `event_processing_finished`: Indicates whether all of the events generated by this unified job have been saved to the database. (boolean)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `can_approve_or_deny`: (field)\n* `approval_expiration`: (field)\n* `timed_out`: (boolean)\n\n\n\n\n\n# Delete a Workflow Approval:\n\nMake a DELETE request to this resource to delete this workflow approval.", + "operationId": "api_workflow_approvals_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow approval", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow approval. (integer)\n* `type`: Data type for this workflow approval. (choice)\n* `url`: URL for this workflow approval. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow approval was created. (datetime)\n* `modified`: Timestamp when this workflow approval was last modified. (datetime)\n* `name`: Name of this workflow approval. (string)\n* `description`: Optional description of this workflow approval. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `execution_environment`: The container image to be used for execution. (id)\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_args`: (string)\n* `job_cwd`: (string)\n* `job_env`: (json)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `result_traceback`: (string)\n* `event_processing_finished`: Indicates whether all of the events generated by this unified job have been saved to the database. (boolean)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `can_approve_or_deny`: (field)\n* `approval_expiration`: (field)\n* `timed_out`: (boolean)\n\n\n\n\n\n# Delete a Workflow Approval:\n\nMake a DELETE request to this resource to delete this workflow approval.", + "operationId": "api_workflow_approvals_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowApproval" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow approval", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_approvals/{id}/approve/": { + "get": { + "description": "record containing the following fields:", + "operationId": "api_workflow_approvals_approve_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowApprovalView" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow approval", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "record containing the following fields:", + "operationId": "api_workflow_approvals_approve_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/WorkflowApprovalView" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowApprovalView" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow approval", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_approvals/{id}/deny/": { + "get": { + "description": "record containing the following fields:", + "operationId": "api_workflow_approvals_deny_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowApprovalView" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow approval", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "record containing the following fields:", + "operationId": "api_workflow_approvals_deny_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/WorkflowApprovalView" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowApprovalView" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow approval", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_nodes/": { + "get": { + "description": "workflow job nodes.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job nodes\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job node records. \n\n## Results\n\nEach workflow job node data structure includes the following fields:\n\n* `id`: Database ID for this workflow job node. (integer)\n* `type`: Data type for this workflow job node. (choice)\n* `url`: URL for this workflow job node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job node was created. (datetime)\n* `modified`: Timestamp when this workflow job node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `job`: (id)\n* `workflow_job`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `do_not_run`: Indicates that a job will not be created when True. Workflow runtime semantics will mark this True if the node is in a path that will decidedly not be ran. A value of False means the node may not run. (boolean)\n* `identifier`: An identifier coresponding to the workflow job template node that this node was created from. (string)\n\n\n\n## Sorting\n\nTo specify that workflow job nodes are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_nodes_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowJobNodeList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_job_nodes/{id}/": { + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow job node. (integer)\n* `type`: Data type for this workflow job node. (choice)\n* `url`: URL for this workflow job node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job node was created. (datetime)\n* `modified`: Timestamp when this workflow job node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `job`: (id)\n* `workflow_job`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `do_not_run`: Indicates that a job will not be created when True. Workflow runtime semantics will mark this True if the node is in a path that will decidedly not be ran. A value of False means the node may not run. (boolean)\n* `identifier`: An identifier coresponding to the workflow job template node that this node was created from. (string)", + "operationId": "api_workflow_job_nodes_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobNodeDetail" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow job node", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_job_nodes/{id}/always_nodes/": { + "get": { + "description": "workflow job nodes associated with the selected\nworkflow job node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job nodes\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job node records. \n\n## Results\n\nEach workflow job node data structure includes the following fields:\n\n* `id`: Database ID for this workflow job node. (integer)\n* `type`: Data type for this workflow job node. (choice)\n* `url`: URL for this workflow job node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job node was created. (datetime)\n* `modified`: Timestamp when this workflow job node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `job`: (id)\n* `workflow_job`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `do_not_run`: Indicates that a job will not be created when True. Workflow runtime semantics will mark this True if the node is in a path that will decidedly not be ran. A value of False means the node may not run. (boolean)\n* `identifier`: An identifier coresponding to the workflow job template node that this node was created from. (string)\n\n\n\n## Sorting\n\nTo specify that workflow job nodes are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_nodes_always_nodes_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowJobNodeList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_job_nodes/{id}/credentials/": { + "get": { + "description": "credentials associated with the selected\nworkflow job node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_nodes_credentials_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Credential" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_job_nodes/{id}/failure_nodes/": { + "get": { + "description": "workflow job nodes associated with the selected\nworkflow job node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job nodes\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job node records. \n\n## Results\n\nEach workflow job node data structure includes the following fields:\n\n* `id`: Database ID for this workflow job node. (integer)\n* `type`: Data type for this workflow job node. (choice)\n* `url`: URL for this workflow job node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job node was created. (datetime)\n* `modified`: Timestamp when this workflow job node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `job`: (id)\n* `workflow_job`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `do_not_run`: Indicates that a job will not be created when True. Workflow runtime semantics will mark this True if the node is in a path that will decidedly not be ran. A value of False means the node may not run. (boolean)\n* `identifier`: An identifier coresponding to the workflow job template node that this node was created from. (string)\n\n\n\n## Sorting\n\nTo specify that workflow job nodes are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_nodes_failure_nodes_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowJobNodeList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_job_nodes/{id}/instance_groups/": { + "get": { + "description": "instance groups associated with the selected\nworkflow job node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_nodes_instance_groups_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InstanceGroup" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "instance groups associated with the selected\nworkflow job node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_nodes_instance_groups_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_nodes/{id}/labels/": { + "get": { + "description": "labels associated with the selected\nworkflow job node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of labels\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more label records. \n\n## Results\n\nEach label data structure includes the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n## Sorting\n\nTo specify that labels are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_nodes_labels_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Label" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_job_nodes/{id}/success_nodes/": { + "get": { + "description": "workflow job nodes associated with the selected\nworkflow job node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job nodes\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job node records. \n\n## Results\n\nEach workflow job node data structure includes the following fields:\n\n* `id`: Database ID for this workflow job node. (integer)\n* `type`: Data type for this workflow job node. (choice)\n* `url`: URL for this workflow job node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job node was created. (datetime)\n* `modified`: Timestamp when this workflow job node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `job`: (id)\n* `workflow_job`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `do_not_run`: Indicates that a job will not be created when True. Workflow runtime semantics will mark this True if the node is in a path that will decidedly not be ran. A value of False means the node may not run. (boolean)\n* `identifier`: An identifier coresponding to the workflow job template node that this node was created from. (string)\n\n\n\n## Sorting\n\nTo specify that workflow job nodes are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_nodes_success_nodes_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowJobNodeList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_job_template_nodes/": { + "get": { + "description": "workflow job template nodes.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job template nodes\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job template node records. \n\n## Results\n\nEach workflow job template node data structure includes the following fields:\n\n* `id`: Database ID for this workflow job template node. (integer)\n* `type`: Data type for this workflow job template node. (choice)\n* `url`: URL for this workflow job template node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template node was created. (datetime)\n* `modified`: Timestamp when this workflow job template node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `workflow_job_template`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string)\n\n\n\n## Sorting\n\nTo specify that workflow job template nodes are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_template_nodes_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowJobTemplateNode" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "workflow job template nodes.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job template nodes\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job template node records. \n\n## Results\n\nEach workflow job template node data structure includes the following fields:\n\n* `id`: Database ID for this workflow job template node. (integer)\n* `type`: Data type for this workflow job template node. (choice)\n* `url`: URL for this workflow job template node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template node was created. (datetime)\n* `modified`: Timestamp when this workflow job template node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `workflow_job_template`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string)\n\n\n\n## Sorting\n\nTo specify that workflow job template nodes are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_template_nodes_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/WorkflowJobTemplateNode" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobTemplateNode" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_template_nodes/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow job template node. (integer)\n* `type`: Data type for this workflow job template node. (choice)\n* `url`: URL for this workflow job template node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template node was created. (datetime)\n* `modified`: Timestamp when this workflow job template node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `workflow_job_template`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string)\n\n\n\n\n\n# Update a Workflow Job Template Node:\n\nMake a PUT or PATCH request to this resource to update this\nworkflow job template node. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `extra_data`: (json, default=`{}`)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id, default=``)\n* `scm_branch`: (string, default=`\"\"`)\n* `job_type`: (choice)\n - `None`: --------- (default)\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string, default=`\"\"`)\n* `skip_tags`: (string, default=`\"\"`)\n* `limit`: (string, default=`\"\"`)\n* `diff_mode`: (boolean, default=`None`)\n* `verbosity`: (choice)\n - `None`: --------- (default)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `forks`: (integer, default=`None`)\n* `job_slice_count`: (integer, default=`None`)\n* `timeout`: (integer, default=`None`)\n* `workflow_job_template`: (id, required)\n* `unified_job_template`: (id, default=``)\n\n\n\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean, default=`False`)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string, default=`\"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\"`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Workflow Job Template Node:\n\nMake a DELETE request to this resource to delete this workflow job template node.", + "operationId": "api_workflow_job_template_nodes_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow job template node", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow job template node. (integer)\n* `type`: Data type for this workflow job template node. (choice)\n* `url`: URL for this workflow job template node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template node was created. (datetime)\n* `modified`: Timestamp when this workflow job template node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `workflow_job_template`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string)\n\n\n\n\n\n# Update a Workflow Job Template Node:\n\nMake a PUT or PATCH request to this resource to update this\nworkflow job template node. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `extra_data`: (json, default=`{}`)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id, default=``)\n* `scm_branch`: (string, default=`\"\"`)\n* `job_type`: (choice)\n - `None`: --------- (default)\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string, default=`\"\"`)\n* `skip_tags`: (string, default=`\"\"`)\n* `limit`: (string, default=`\"\"`)\n* `diff_mode`: (boolean, default=`None`)\n* `verbosity`: (choice)\n - `None`: --------- (default)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `forks`: (integer, default=`None`)\n* `job_slice_count`: (integer, default=`None`)\n* `timeout`: (integer, default=`None`)\n* `workflow_job_template`: (id, required)\n* `unified_job_template`: (id, default=``)\n\n\n\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean, default=`False`)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string, default=`\"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\"`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Workflow Job Template Node:\n\nMake a DELETE request to this resource to delete this workflow job template node.", + "operationId": "api_workflow_job_template_nodes_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobTemplateNodeDetail" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow job template node", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow job template node. (integer)\n* `type`: Data type for this workflow job template node. (choice)\n* `url`: URL for this workflow job template node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template node was created. (datetime)\n* `modified`: Timestamp when this workflow job template node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `workflow_job_template`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string)\n\n\n\n\n\n# Update a Workflow Job Template Node:\n\nMake a PUT or PATCH request to this resource to update this\nworkflow job template node. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `extra_data`: (json, default=`{}`)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id, default=``)\n* `scm_branch`: (string, default=`\"\"`)\n* `job_type`: (choice)\n - `None`: --------- (default)\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string, default=`\"\"`)\n* `skip_tags`: (string, default=`\"\"`)\n* `limit`: (string, default=`\"\"`)\n* `diff_mode`: (boolean, default=`None`)\n* `verbosity`: (choice)\n - `None`: --------- (default)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `forks`: (integer, default=`None`)\n* `job_slice_count`: (integer, default=`None`)\n* `timeout`: (integer, default=`None`)\n* `workflow_job_template`: (id, required)\n* `unified_job_template`: (id, default=``)\n\n\n\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean, default=`False`)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string, default=`\"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\"`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Workflow Job Template Node:\n\nMake a DELETE request to this resource to delete this workflow job template node.", + "operationId": "api_workflow_job_template_nodes_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/WorkflowJobTemplateNodeDetail" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobTemplateNodeDetail" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow job template node", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow job template node. (integer)\n* `type`: Data type for this workflow job template node. (choice)\n* `url`: URL for this workflow job template node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template node was created. (datetime)\n* `modified`: Timestamp when this workflow job template node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `workflow_job_template`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string)\n\n\n\n\n\n# Update a Workflow Job Template Node:\n\nMake a PUT or PATCH request to this resource to update this\nworkflow job template node. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `extra_data`: (json, default=`{}`)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id, default=``)\n* `scm_branch`: (string, default=`\"\"`)\n* `job_type`: (choice)\n - `None`: --------- (default)\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string, default=`\"\"`)\n* `skip_tags`: (string, default=`\"\"`)\n* `limit`: (string, default=`\"\"`)\n* `diff_mode`: (boolean, default=`None`)\n* `verbosity`: (choice)\n - `None`: --------- (default)\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id, default=``)\n* `forks`: (integer, default=`None`)\n* `job_slice_count`: (integer, default=`None`)\n* `timeout`: (integer, default=`None`)\n* `workflow_job_template`: (id, required)\n* `unified_job_template`: (id, default=``)\n\n\n\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean, default=`False`)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string, default=`\"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\"`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Workflow Job Template Node:\n\nMake a DELETE request to this resource to delete this workflow job template node.", + "operationId": "api_workflow_job_template_nodes_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/WorkflowJobTemplateNodeDetail" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobTemplateNodeDetail" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow job template node", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_template_nodes/{id}/always_nodes/": { + "get": { + "description": "workflow job template nodes associated with the selected\nworkflow job template node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job template nodes\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job template node records. \n\n## Results\n\nEach workflow job template node data structure includes the following fields:\n\n* `id`: Database ID for this workflow job template node. (integer)\n* `type`: Data type for this workflow job template node. (choice)\n* `url`: URL for this workflow job template node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template node was created. (datetime)\n* `modified`: Timestamp when this workflow job template node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `workflow_job_template`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string)\n\n\n\n## Sorting\n\nTo specify that workflow job template nodes are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_template_nodes_always_nodes_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowJobTemplateNode" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "workflow job template nodes associated with the selected\nworkflow job template node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job template nodes\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job template node records. \n\n## Results\n\nEach workflow job template node data structure includes the following fields:\n\n* `id`: Database ID for this workflow job template node. (integer)\n* `type`: Data type for this workflow job template node. (choice)\n* `url`: URL for this workflow job template node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template node was created. (datetime)\n* `modified`: Timestamp when this workflow job template node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `workflow_job_template`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string)\n\n\n\n## Sorting\n\nTo specify that workflow job template nodes are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_template_nodes_always_nodes_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "associate": true, + "id": 2 + } + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobTemplateNode" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_template_nodes/{id}/create_approval_template/": { + "get": { + "description": "record containing the following fields:\n\n* `timeout`: The amount of time (in seconds) before the approval node expires and fails. (integer)\n* `name`: Name of this workflow approval template. (string)\n* `description`: Optional description of this workflow approval template. (string)", + "operationId": "api_workflow_job_template_nodes_create_approval_template_read", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": {} + }, + "schema": { + "$ref": "#/definitions/WorkflowJobTemplateNodeCreateApproval" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow job template node", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "record containing the following fields:\n\n* `timeout`: The amount of time (in seconds) before the approval node expires and fails. (integer)\n* `name`: Name of this workflow approval template. (string)\n* `description`: Optional description of this workflow approval template. (string)", + "operationId": "api_workflow_job_template_nodes_create_approval_template_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "description": "An approval", + "name": "URL Test", + "timeout": 0 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "An approval", + "execution_environment": null, + "id": 2, + "last_job_failed": false, + "last_job_run": null, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "URL Test", + "next_job_run": null, + "related": { + "jobs": "/api/v2/workflow_approval_templates/2/approvals/" + }, + "status": "never updated", + "summary_fields": { + "workflow_job_template": { + "description": "", + "id": 1, + "name": "" + } + }, + "timeout": 0, + "type": "workflow_approval_template", + "url": "/api/v2/workflow_approval_templates/2/" + } + }, + "schema": { + "$ref": "#/definitions/WorkflowJobTemplateNodeCreateApproval" + } + }, + "400": { + "examples": { + "application/json": { + "name": [ + "This field may not be blank." + ] + } + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow job template node", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_template_nodes/{id}/credentials/": { + "get": { + "description": "credentials associated with the selected\nworkflow job template node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_template_nodes_credentials_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Credential" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "credentials associated with the selected\nworkflow job template node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of credentials\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more credential records. \n\n## Results\n\nEach credential data structure includes the following fields:\n\n* `id`: Database ID for this credential. (integer)\n* `type`: Data type for this credential. (choice)\n* `url`: URL for this credential. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this credential was created. (datetime)\n* `modified`: Timestamp when this credential was last modified. (datetime)\n* `name`: Name of this credential. (string)\n* `description`: Optional description of this credential. (string)\n* `organization`: (id)\n* `credential_type`: Specify the type of credential you want to create. Refer to the documentation for details on each type. (id)\n* `managed`: (boolean)\n* `inputs`: Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax. (json)\n* `kind`: (field)\n* `cloud`: (field)\n* `kubernetes`: (field)\n\n\n\n## Sorting\n\nTo specify that credentials are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_template_nodes_credentials_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Credential" + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Credential" + } + }, + "400": { + "examples": { + "application/json": { + "msg": "Related template cannot accept credentials on launch." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_template_nodes/{id}/failure_nodes/": { + "get": { + "description": "workflow job template nodes associated with the selected\nworkflow job template node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job template nodes\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job template node records. \n\n## Results\n\nEach workflow job template node data structure includes the following fields:\n\n* `id`: Database ID for this workflow job template node. (integer)\n* `type`: Data type for this workflow job template node. (choice)\n* `url`: URL for this workflow job template node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template node was created. (datetime)\n* `modified`: Timestamp when this workflow job template node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `workflow_job_template`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string)\n\n\n\n## Sorting\n\nTo specify that workflow job template nodes are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_template_nodes_failure_nodes_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowJobTemplateNode" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "workflow job template nodes associated with the selected\nworkflow job template node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job template nodes\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job template node records. \n\n## Results\n\nEach workflow job template node data structure includes the following fields:\n\n* `id`: Database ID for this workflow job template node. (integer)\n* `type`: Data type for this workflow job template node. (choice)\n* `url`: URL for this workflow job template node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template node was created. (datetime)\n* `modified`: Timestamp when this workflow job template node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `workflow_job_template`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string)\n\n\n\n## Sorting\n\nTo specify that workflow job template nodes are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_template_nodes_failure_nodes_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "associate": true, + "id": 2 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobTemplateNode" + } + }, + "400": { + "examples": { + "application/json": { + "Error": "Relationship not allowed." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_template_nodes/{id}/instance_groups/": { + "get": { + "description": "instance groups associated with the selected\nworkflow job template node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_template_nodes_instance_groups_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/InstanceGroup" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "instance groups associated with the selected\nworkflow job template node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of instance groups\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more instance group records. \n\n## Results\n\nEach instance group data structure includes the following fields:\n\n* `id`: Database ID for this instance group. (integer)\n* `type`: Data type for this instance group. (choice)\n* `url`: URL for this instance group. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `name`: Name of this instance group. (string)\n* `created`: Timestamp when this instance group was created. (datetime)\n* `modified`: Timestamp when this instance group was last modified. (datetime)\n* `capacity`: (field)\n* `consumed_capacity`: (field)\n* `percent_capacity_remaining`: (field)\n* `jobs_running`: (field)\n* `max_concurrent_jobs`: Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced. (integer)\n* `max_forks`: Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced. (integer)\n* `jobs_total`: Count of all jobs that target this instance group (integer)\n* `instances`: (field)\n* `is_container_group`: Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster. (boolean)\n* `credential`: (id)\n* `policy_instance_percentage`: Minimum percentage of all instances that will be automatically assigned to this group when new instances come online. (integer)\n* `policy_instance_minimum`: Static minimum number of Instances that will be automatically assign to this group when new instances come online. (integer)\n* `policy_instance_list`: List of exact-match Instances that will be assigned to this group (json)\n* `pod_spec_override`: (string)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n\n\n\n## Sorting\n\nTo specify that instance groups are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_template_nodes_instance_groups_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/InstanceGroup" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_template_nodes/{id}/labels/": { + "get": { + "description": "labels associated with the selected\nworkflow job template node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of labels\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more label records. \n\n## Results\n\nEach label data structure includes the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n## Sorting\n\nTo specify that labels are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_template_nodes_labels_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Label" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "labels associated with the selected\nworkflow job template node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of labels\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more label records. \n\n## Results\n\nEach label data structure includes the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n## Sorting\n\nTo specify that labels are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_template_nodes_labels_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Label" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Label" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_template_nodes/{id}/success_nodes/": { + "get": { + "description": "workflow job template nodes associated with the selected\nworkflow job template node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job template nodes\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job template node records. \n\n## Results\n\nEach workflow job template node data structure includes the following fields:\n\n* `id`: Database ID for this workflow job template node. (integer)\n* `type`: Data type for this workflow job template node. (choice)\n* `url`: URL for this workflow job template node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template node was created. (datetime)\n* `modified`: Timestamp when this workflow job template node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `workflow_job_template`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string)\n\n\n\n## Sorting\n\nTo specify that workflow job template nodes are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_template_nodes_success_nodes_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowJobTemplateNode" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "workflow job template nodes associated with the selected\nworkflow job template node.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job template nodes\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job template node records. \n\n## Results\n\nEach workflow job template node data structure includes the following fields:\n\n* `id`: Database ID for this workflow job template node. (integer)\n* `type`: Data type for this workflow job template node. (choice)\n* `url`: URL for this workflow job template node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template node was created. (datetime)\n* `modified`: Timestamp when this workflow job template node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `workflow_job_template`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string)\n\n\n\n## Sorting\n\nTo specify that workflow job template nodes are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_template_nodes_success_nodes_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "associate": true, + "id": 2 + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobTemplateNode" + } + }, + "400": { + "examples": { + "application/json": { + "Error": "Relationship not allowed." + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_templates/": { + "get": { + "description": "workflow job templates.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job template records. \n\n## Results\n\nEach workflow job template data structure includes the following fields:\n\n* `id`: Database ID for this workflow job template. (integer)\n* `type`: Data type for this workflow job template. (choice)\n* `url`: URL for this workflow job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template was created. (datetime)\n* `modified`: Timestamp when this workflow job template was last modified. (datetime)\n* `name`: Name of this workflow job template. (string)\n* `description`: Optional description of this workflow job template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `extra_vars`: (json)\n* `organization`: The organization used to determine access to this template. (id)\n* `survey_enabled`: (boolean)\n* `allow_simultaneous`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `limit`: (string)\n* `scm_branch`: (string)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `ask_labels_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `skip_tags`: (string)\n* `job_tags`: (string)\n\n\n\n## Sorting\n\nTo specify that workflow job templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowJobTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ], + "post": { + "description": "workflow job templates.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job template records. \n\n## Results\n\nEach workflow job template data structure includes the following fields:\n\n* `id`: Database ID for this workflow job template. (integer)\n* `type`: Data type for this workflow job template. (choice)\n* `url`: URL for this workflow job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template was created. (datetime)\n* `modified`: Timestamp when this workflow job template was last modified. (datetime)\n* `name`: Name of this workflow job template. (string)\n* `description`: Optional description of this workflow job template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `extra_vars`: (json)\n* `organization`: The organization used to determine access to this template. (id)\n* `survey_enabled`: (boolean)\n* `allow_simultaneous`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `limit`: (string)\n* `scm_branch`: (string)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `ask_labels_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `skip_tags`: (string)\n* `job_tags`: (string)\n\n\n\n## Sorting\n\nTo specify that workflow job templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/WorkflowJobTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_templates/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow job template. (integer)\n* `type`: Data type for this workflow job template. (choice)\n* `url`: URL for this workflow job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template was created. (datetime)\n* `modified`: Timestamp when this workflow job template was last modified. (datetime)\n* `name`: Name of this workflow job template. (string)\n* `description`: Optional description of this workflow job template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `extra_vars`: (json)\n* `organization`: The organization used to determine access to this template. (id)\n* `survey_enabled`: (boolean)\n* `allow_simultaneous`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `limit`: (string)\n* `scm_branch`: (string)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `ask_labels_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `skip_tags`: (string)\n* `job_tags`: (string)\n\n\n\n\n\n# Update a Workflow Job Template:\n\nMake a PUT or PATCH request to this resource to update this\nworkflow job template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this workflow job template. (string, required)\n* `description`: Optional description of this workflow job template. (string, default=`\"\"`)\n\n\n\n\n* `extra_vars`: (json, default=``)\n* `organization`: The organization used to determine access to this template. (id, default=``)\n* `survey_enabled`: (boolean, default=`False`)\n* `allow_simultaneous`: (boolean, default=`False`)\n* `ask_variables_on_launch`: (boolean, default=`False`)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id, default=``)\n* `limit`: (string, default=`\"\"`)\n* `scm_branch`: (string, default=`\"\"`)\n* `ask_inventory_on_launch`: (boolean, default=`False`)\n* `ask_scm_branch_on_launch`: (boolean, default=`False`)\n* `ask_limit_on_launch`: (boolean, default=`False`)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id, default=``)\n* `ask_labels_on_launch`: (boolean, default=`False`)\n* `ask_skip_tags_on_launch`: (boolean, default=`False`)\n* `ask_tags_on_launch`: (boolean, default=`False`)\n* `skip_tags`: (string, default=`\"\"`)\n* `job_tags`: (string, default=`\"\"`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Workflow Job Template:\n\nMake a DELETE request to this resource to delete this workflow job template.", + "operationId": "api_workflow_job_templates_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow job template", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow job template. (integer)\n* `type`: Data type for this workflow job template. (choice)\n* `url`: URL for this workflow job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template was created. (datetime)\n* `modified`: Timestamp when this workflow job template was last modified. (datetime)\n* `name`: Name of this workflow job template. (string)\n* `description`: Optional description of this workflow job template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `extra_vars`: (json)\n* `organization`: The organization used to determine access to this template. (id)\n* `survey_enabled`: (boolean)\n* `allow_simultaneous`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `limit`: (string)\n* `scm_branch`: (string)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `ask_labels_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `skip_tags`: (string)\n* `job_tags`: (string)\n\n\n\n\n\n# Update a Workflow Job Template:\n\nMake a PUT or PATCH request to this resource to update this\nworkflow job template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this workflow job template. (string, required)\n* `description`: Optional description of this workflow job template. (string, default=`\"\"`)\n\n\n\n\n* `extra_vars`: (json, default=``)\n* `organization`: The organization used to determine access to this template. (id, default=``)\n* `survey_enabled`: (boolean, default=`False`)\n* `allow_simultaneous`: (boolean, default=`False`)\n* `ask_variables_on_launch`: (boolean, default=`False`)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id, default=``)\n* `limit`: (string, default=`\"\"`)\n* `scm_branch`: (string, default=`\"\"`)\n* `ask_inventory_on_launch`: (boolean, default=`False`)\n* `ask_scm_branch_on_launch`: (boolean, default=`False`)\n* `ask_limit_on_launch`: (boolean, default=`False`)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id, default=``)\n* `ask_labels_on_launch`: (boolean, default=`False`)\n* `ask_skip_tags_on_launch`: (boolean, default=`False`)\n* `ask_tags_on_launch`: (boolean, default=`False`)\n* `skip_tags`: (string, default=`\"\"`)\n* `job_tags`: (string, default=`\"\"`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Workflow Job Template:\n\nMake a DELETE request to this resource to delete this workflow job template.", + "operationId": "api_workflow_job_templates_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobTemplate" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow job template", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "patch": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow job template. (integer)\n* `type`: Data type for this workflow job template. (choice)\n* `url`: URL for this workflow job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template was created. (datetime)\n* `modified`: Timestamp when this workflow job template was last modified. (datetime)\n* `name`: Name of this workflow job template. (string)\n* `description`: Optional description of this workflow job template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `extra_vars`: (json)\n* `organization`: The organization used to determine access to this template. (id)\n* `survey_enabled`: (boolean)\n* `allow_simultaneous`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `limit`: (string)\n* `scm_branch`: (string)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `ask_labels_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `skip_tags`: (string)\n* `job_tags`: (string)\n\n\n\n\n\n# Update a Workflow Job Template:\n\nMake a PUT or PATCH request to this resource to update this\nworkflow job template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this workflow job template. (string, required)\n* `description`: Optional description of this workflow job template. (string, default=`\"\"`)\n\n\n\n\n* `extra_vars`: (json, default=``)\n* `organization`: The organization used to determine access to this template. (id, default=``)\n* `survey_enabled`: (boolean, default=`False`)\n* `allow_simultaneous`: (boolean, default=`False`)\n* `ask_variables_on_launch`: (boolean, default=`False`)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id, default=``)\n* `limit`: (string, default=`\"\"`)\n* `scm_branch`: (string, default=`\"\"`)\n* `ask_inventory_on_launch`: (boolean, default=`False`)\n* `ask_scm_branch_on_launch`: (boolean, default=`False`)\n* `ask_limit_on_launch`: (boolean, default=`False`)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id, default=``)\n* `ask_labels_on_launch`: (boolean, default=`False`)\n* `ask_skip_tags_on_launch`: (boolean, default=`False`)\n* `ask_tags_on_launch`: (boolean, default=`False`)\n* `skip_tags`: (string, default=`\"\"`)\n* `job_tags`: (string, default=`\"\"`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Workflow Job Template:\n\nMake a DELETE request to this resource to delete this workflow job template.", + "operationId": "api_workflow_job_templates_partial_update", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "name": "foooooo" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "allow_simultaneous": false, + "ask_inventory_on_launch": false, + "ask_labels_on_launch": false, + "ask_limit_on_launch": false, + "ask_scm_branch_on_launch": false, + "ask_skip_tags_on_launch": false, + "ask_tags_on_launch": false, + "ask_variables_on_launch": false, + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "extra_vars": "", + "id": 1, + "inventory": null, + "job_tags": null, + "last_job_failed": false, + "last_job_run": null, + "limit": null, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "foooooo", + "next_job_run": null, + "organization": 1, + "related": { + "access_list": "/api/v2/workflow_job_templates/1/access_list/", + "activity_stream": "/api/v2/workflow_job_templates/1/activity_stream/", + "copy": "/api/v2/workflow_job_templates/1/copy/", + "labels": "/api/v2/workflow_job_templates/1/labels/", + "launch": "/api/v2/workflow_job_templates/1/launch/", + "notification_templates_approvals": "/api/v2/workflow_job_templates/1/notification_templates_approvals/", + "notification_templates_error": "/api/v2/workflow_job_templates/1/notification_templates_error/", + "notification_templates_started": "/api/v2/workflow_job_templates/1/notification_templates_started/", + "notification_templates_success": "/api/v2/workflow_job_templates/1/notification_templates_success/", + "object_roles": "/api/v2/workflow_job_templates/1/object_roles/", + "organization": "/api/v2/organizations/1/", + "schedules": "/api/v2/workflow_job_templates/1/schedules/", + "survey_spec": "/api/v2/workflow_job_templates/1/survey_spec/", + "webhook_key": "/api/v2/workflow_job_templates/1/webhook_key/", + "webhook_receiver": "", + "workflow_jobs": "/api/v2/workflow_job_templates/1/workflow_jobs/", + "workflow_nodes": "/api/v2/workflow_job_templates/1/workflow_nodes/" + }, + "scm_branch": null, + "skip_tags": null, + "status": "never updated", + "summary_fields": { + "labels": { + "count": 0, + "results": [] + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the workflow job template", + "id": 16, + "name": "Admin" + }, + "approval_role": { + "description": "Can approve or deny a workflow approval node", + "id": 19, + "name": "Approve" + }, + "execute_role": { + "description": "May run the workflow job template", + "id": 17, + "name": "Execute" + }, + "read_role": { + "description": "May view settings for the workflow job template", + "id": 18, + "name": "Read" + } + }, + "organization": { + "description": "test-org-desc", + "id": 1, + "name": "test-org" + }, + "recent_jobs": [], + "user_capabilities": { + "copy": true, + "delete": true, + "edit": true, + "schedule": true, + "start": true + } + }, + "survey_enabled": false, + "type": "workflow_job_template", + "url": "/api/v2/workflow_job_templates/1/", + "webhook_credential": null, + "webhook_service": "" + } + }, + "schema": { + "$ref": "#/definitions/WorkflowJobTemplate" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow job template", + "tags": [ + "api" + ] + }, + "put": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow job template. (integer)\n* `type`: Data type for this workflow job template. (choice)\n* `url`: URL for this workflow job template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template was created. (datetime)\n* `modified`: Timestamp when this workflow job template was last modified. (datetime)\n* `name`: Name of this workflow job template. (string)\n* `description`: Optional description of this workflow job template. (string)\n* `last_job_run`: (datetime)\n* `last_job_failed`: (boolean)\n* `next_job_run`: (datetime)\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n - `never updated`: Never Updated\n - `ok`: OK\n - `missing`: Missing\n - `none`: No External Source\n - `updating`: Updating\n* `extra_vars`: (json)\n* `organization`: The organization used to determine access to this template. (id)\n* `survey_enabled`: (boolean)\n* `allow_simultaneous`: (boolean)\n* `ask_variables_on_launch`: (boolean)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `limit`: (string)\n* `scm_branch`: (string)\n* `ask_inventory_on_launch`: (boolean)\n* `ask_scm_branch_on_launch`: (boolean)\n* `ask_limit_on_launch`: (boolean)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `ask_labels_on_launch`: (boolean)\n* `ask_skip_tags_on_launch`: (boolean)\n* `ask_tags_on_launch`: (boolean)\n* `skip_tags`: (string)\n* `job_tags`: (string)\n\n\n\n\n\n# Update a Workflow Job Template:\n\nMake a PUT or PATCH request to this resource to update this\nworkflow job template. The following fields may be modified:\n\n\n\n\n\n\n\n\n\n* `name`: Name of this workflow job template. (string, required)\n* `description`: Optional description of this workflow job template. (string, default=`\"\"`)\n\n\n\n\n* `extra_vars`: (json, default=``)\n* `organization`: The organization used to determine access to this template. (id, default=``)\n* `survey_enabled`: (boolean, default=`False`)\n* `allow_simultaneous`: (boolean, default=`False`)\n* `ask_variables_on_launch`: (boolean, default=`False`)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id, default=``)\n* `limit`: (string, default=`\"\"`)\n* `scm_branch`: (string, default=`\"\"`)\n* `ask_inventory_on_launch`: (boolean, default=`False`)\n* `ask_scm_branch_on_launch`: (boolean, default=`False`)\n* `ask_limit_on_launch`: (boolean, default=`False`)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id, default=``)\n* `ask_labels_on_launch`: (boolean, default=`False`)\n* `ask_skip_tags_on_launch`: (boolean, default=`False`)\n* `ask_tags_on_launch`: (boolean, default=`False`)\n* `skip_tags`: (string, default=`\"\"`)\n* `job_tags`: (string, default=`\"\"`)\n\n\n\n\n\n\nFor a PUT request, include **all** fields in the request.\n\n\n\nFor a PATCH request, include only the fields that are being modified.\n\n\n\n# Delete a Workflow Job Template:\n\nMake a DELETE request to this resource to delete this workflow job template.", + "operationId": "api_workflow_job_templates_update", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/WorkflowJobTemplate" + } + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobTemplate" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow job template", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_templates/{id}/access_list/": { + "get": { + "description": "users.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of users\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more user records. \n\n## Results\n\nEach user data structure includes the following fields:\n\n* `id`: Database ID for this user. (integer)\n* `type`: Data type for this user. (choice)\n* `url`: URL for this user. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this user was created. (datetime)\n* `modified`: Timestamp when this user was last modified. (datetime)\n* `username`: Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. (string)\n* `first_name`: (string)\n* `last_name`: (string)\n* `email`: (string)\n* `is_superuser`: Designates that this user has all permissions without explicitly assigning them. (boolean)\n* `is_system_auditor`: (boolean)\n* `password`: Field used to change the password. (string)\n* `ldap_dn`: (string)\n* `last_login`: (datetime)\n* `external_account`: Set if the account is managed by an external service (field)\n\n\n\n## Sorting\n\nTo specify that users are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=username\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-username\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=username,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_access_list_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ResourceAccessListElement" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_job_templates/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_job_templates/{id}/copy/": { + "get": { + "description": "permission to copy the workflow job template and whether any linked\ntemplates or prompted fields will be ignored due to permissions problems.\nThe response will include the following fields:\n\n* `can_copy`: Flag indicating whether the active user has permission to make\n a copy of this workflow job template, provides same content as the\n workflow job template detail view summary_fields.user_capabilities.copy\n (boolean, read-only)\n* `can_copy_without_user_input`: Flag indicating if the user should be\n prompted for confirmation before the copy is executed (boolean, read-only)\n* `templates_unable_to_copy`: List of node ids of nodes that have a related\n job template, project, or inventory that the current user lacks permission\n to use and will be missing in workflow nodes of the copy (array, read-only)\n* `inventories_unable_to_copy`: List of node ids of nodes that have a related\n prompted inventory that the current user lacks permission\n to use and will be missing in workflow nodes of the copy (array, read-only)\n* `credentials_unable_to_copy`: List of node ids of nodes that have a related\n prompted credential that the current user lacks permission\n to use and will be missing in workflow nodes of the copy (array, read-only)\n\nMake a POST request to this endpoint to save a copy of this\nworkflow job template. No POST data is accepted for this action.\n\nIf successful, the response status code will be 201. The response body will\ncontain serialized data about the new workflow job template, which will be\nsimilar to the original workflow job template, but with an additional `@`\nand a timestamp in the name.\n\nAll workflow nodes and connections in the original will also exist in the\ncopy. The nodes will be missing related resources if the user did not have\naccess to use them.", + "operationId": "api_workflow_job_templates_copy_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Copy" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to determine if the current user has", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "permission to copy the workflow job template and whether any linked\ntemplates or prompted fields will be ignored due to permissions problems.\nThe response will include the following fields:\n\n* `can_copy`: Flag indicating whether the active user has permission to make\n a copy of this workflow job template, provides same content as the\n workflow job template detail view summary_fields.user_capabilities.copy\n (boolean, read-only)\n* `can_copy_without_user_input`: Flag indicating if the user should be\n prompted for confirmation before the copy is executed (boolean, read-only)\n* `templates_unable_to_copy`: List of node ids of nodes that have a related\n job template, project, or inventory that the current user lacks permission\n to use and will be missing in workflow nodes of the copy (array, read-only)\n* `inventories_unable_to_copy`: List of node ids of nodes that have a related\n prompted inventory that the current user lacks permission\n to use and will be missing in workflow nodes of the copy (array, read-only)\n* `credentials_unable_to_copy`: List of node ids of nodes that have a related\n prompted credential that the current user lacks permission\n to use and will be missing in workflow nodes of the copy (array, read-only)\n\nMake a POST request to this endpoint to save a copy of this\nworkflow job template. No POST data is accepted for this action.\n\nIf successful, the response status code will be 201. The response body will\ncontain serialized data about the new workflow job template, which will be\nsimilar to the original workflow job template, but with an additional `@`\nand a timestamp in the name.\n\nAll workflow nodes and connections in the original will also exist in the\ncopy. The nodes will be missing related resources if the user did not have\naccess to use them.", + "operationId": "api_workflow_job_templates_copy_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Copy" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Copy" + } + } + }, + "summary": "Make a GET request to this resource to determine if the current user has", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_templates/{id}/github/": { + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_workflow_job_templates_github_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/workflow_job_templates/{id}/github/", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_templates/{id}/gitlab/": { + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_workflow_job_templates_gitlab_create", + "parameters": [], + "responses": { + "201": { + "description": "" + } + }, + "summary": "No Description for post on /api/{version}/workflow_job_templates/{id}/gitlab/", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_templates/{id}/labels/": { + "get": { + "description": "labels associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of labels\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more label records. \n\n## Results\n\nEach label data structure includes the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n## Sorting\n\nTo specify that labels are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_labels_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Label" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "labels associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of labels\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more label records. \n\n## Results\n\nEach label data structure includes the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n## Sorting\n\nTo specify that labels are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_labels_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Label" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Label" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_templates/{id}/launch/": { + "get": { + "description": "can be launched and whether any passwords are required to launch the\nworkflow_job_template. The response will include the following fields:\n\n* `can_start_without_user_input`: Flag indicating if the workflow_job_template\n can be launched without user-input (boolean, read-only)\n* `variables_needed_to_start`: Required variable names required to launch the\n workflow_job_template (array, read-only)\n* `survey_enabled`: Flag indicating whether the workflow_job_template has an\n enabled survey (boolean, read-only)\n* `extra_vars`: Text which is the `extra_vars` field of this workflow_job_template\n (text, read-only)\n* `node_templates_missing`: List of node ids of all nodes that have a\n null `unified_job_template`, which will cause their branches to stop\n execution (list, read-only)\n* `node_prompts_rejected`: List of node ids of all nodes that have\n specified a field that will be rejected because its `unified_job_template`\n does not allow prompting for this field, this will not halt execution of\n the branch but the field will be ignored (list, read-only)\n* `workflow_job_template_data`: JSON object listing general information of\n this workflow_job_template (JSON object, read-only)\n\nMake a POST request to this resource to launch the workflow_job_template. If any\ncredential, inventory, project or extra variables (extra_vars) are required, they\nmust be passed via POST data, with extra_vars given as a YAML or JSON string and\nescaped parentheses.\n\nIf successful, the response status code will be 201. If any required passwords\nare not provided, a 400 status code will be returned. If the workflow job cannot\nbe launched, a 405 status code will be returned. If the provided credential or\ninventory are not allowed to be used by the user, then a 403 status code will\nbe returned.", + "operationId": "api_workflow_job_templates_launch_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobLaunch" + } + } + }, + "summary": "Make a GET request to this resource to determine if the workflow_job_template", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "can be launched and whether any passwords are required to launch the\nworkflow_job_template. The response will include the following fields:\n\n* `can_start_without_user_input`: Flag indicating if the workflow_job_template\n can be launched without user-input (boolean, read-only)\n* `variables_needed_to_start`: Required variable names required to launch the\n workflow_job_template (array, read-only)\n* `survey_enabled`: Flag indicating whether the workflow_job_template has an\n enabled survey (boolean, read-only)\n* `extra_vars`: Text which is the `extra_vars` field of this workflow_job_template\n (text, read-only)\n* `node_templates_missing`: List of node ids of all nodes that have a\n null `unified_job_template`, which will cause their branches to stop\n execution (list, read-only)\n* `node_prompts_rejected`: List of node ids of all nodes that have\n specified a field that will be rejected because its `unified_job_template`\n does not allow prompting for this field, this will not halt execution of\n the branch but the field will be ignored (list, read-only)\n* `workflow_job_template_data`: JSON object listing general information of\n this workflow_job_template (JSON object, read-only)\n\nMake a POST request to this resource to launch the workflow_job_template. If any\ncredential, inventory, project or extra variables (extra_vars) are required, they\nmust be passed via POST data, with extra_vars given as a YAML or JSON string and\nescaped parentheses.\n\nIf successful, the response status code will be 201. If any required passwords\nare not provided, a 400 status code will be returned. If the workflow job cannot\nbe launched, a 405 status code will be returned. If the provided credential or\ninventory are not allowed to be used by the user, then a 403 status code will\nbe returned.", + "operationId": "api_workflow_job_templates_launch_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/WorkflowJobLaunch" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobLaunch" + } + } + }, + "summary": "Make a GET request to this resource to determine if the workflow_job_template", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_templates/{id}/notification_templates_approvals/": { + "get": { + "description": "notification templates associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_notification_templates_approvals_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_notification_templates_approvals_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_templates/{id}/notification_templates_error/": { + "get": { + "description": "notification templates associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_notification_templates_error_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_notification_templates_error_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_templates/{id}/notification_templates_started/": { + "get": { + "description": "notification templates associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_notification_templates_started_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_notification_templates_started_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_templates/{id}/notification_templates_success/": { + "get": { + "description": "notification templates associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_notification_templates_success_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/NotificationTemplate" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "notification templates associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notification templates\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification template records. \n\n## Results\n\nEach notification template data structure includes the following fields:\n\n* `id`: Database ID for this notification template. (integer)\n* `type`: Data type for this notification template. (choice)\n* `url`: URL for this notification template. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification template was created. (datetime)\n* `modified`: Timestamp when this notification template was last modified. (datetime)\n* `name`: Name of this notification template. (string)\n* `description`: Optional description of this notification template. (string)\n* `organization`: (id)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `notification_configuration`: (json)\n* `messages`: Optional custom messages for notification template. (json)\n\n\n\n## Sorting\n\nTo specify that notification templates are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_notification_templates_success_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/NotificationTemplate" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_templates/{id}/object_roles/": { + "get": { + "description": "roles associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of roles\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more role records. \n\n## Results\n\nEach role data structure includes the following fields:\n\n* `id`: Database ID for this role. (integer)\n* `type`: Data type for this role. (choice)\n* `url`: URL for this role. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `name`: Name of this role. (field)\n* `description`: Optional description of this role. (field)\n\n\n\n## Sorting\n\nTo specify that roles are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_object_roles_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Role" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_job_templates/{id}/schedules/": { + "get": { + "description": "schedules associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of schedules\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more schedule records. \n\n## Results\n\nEach schedule data structure includes the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n## Sorting\n\nTo specify that schedules are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_schedules_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Schedule" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "schedules associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of schedules\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more schedule records. \n\n## Results\n\nEach schedule data structure includes the following fields:\n\n* `rrule`: A value representing the schedules iCal recurrence rule. (string)\n* `id`: Database ID for this schedule. (integer)\n* `type`: Data type for this schedule. (choice)\n* `url`: URL for this schedule. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this schedule was created. (datetime)\n* `modified`: Timestamp when this schedule was last modified. (datetime)\n* `name`: Name of this schedule. (string)\n* `description`: Optional description of this schedule. (string)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `unified_job_template`: (id)\n* `enabled`: Enables processing of this schedule. (boolean)\n* `dtstart`: The first occurrence of the schedule occurs on or after this time. (datetime)\n* `dtend`: The last occurrence of the schedule occurs before this time, aftewards the schedule expires. (datetime)\n* `next_run`: The next time that the scheduled action will run. (datetime)\n* `timezone`: The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field. (field)\n* `until`: The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned (field)\n\n\n\n## Sorting\n\nTo specify that schedules are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_schedules_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "inventory": 1, + "name": "test sch", + "rrule": "DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1" + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "diff_mode": null, + "dtend": "2015-11-17T05:00:00Z", + "dtstart": "2015-11-17T05:00:00Z", + "enabled": true, + "execution_environment": null, + "extra_data": {}, + "forks": null, + "id": 1, + "inventory": 1, + "job_slice_count": null, + "job_tags": null, + "job_type": null, + "limit": null, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "test sch", + "next_run": null, + "related": { + "credentials": "/api/v2/schedules/1/credentials/", + "instance_groups": "/api/v2/schedules/1/instance_groups/", + "inventory": "/api/v2/inventories/1/", + "labels": "/api/v2/schedules/1/labels/", + "unified_job_template": "/api/v2/workflow_job_templates/1/", + "unified_jobs": "/api/v2/schedules/1/jobs/" + }, + "rrule": "DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=1", + "scm_branch": null, + "skip_tags": null, + "summary_fields": { + "inventory": { + "description": "", + "has_active_failures": false, + "has_inventory_sources": false, + "hosts_with_active_failures": 0, + "id": 1, + "inventory_sources_with_failures": 0, + "kind": "", + "name": "test-inv", + "organization_id": 1, + "total_groups": 0, + "total_hosts": 0, + "total_inventory_sources": 0 + }, + "unified_job_template": { + "description": "", + "id": 1, + "name": "test-workflow_job_template", + "unified_job_type": "workflow_job" + }, + "user_capabilities": { + "delete": true, + "edit": true + } + }, + "timeout": null, + "timezone": "UTC", + "type": "schedule", + "unified_job_template": 1, + "until": "", + "url": "/api/v2/schedules/1/", + "verbosity": null + } + }, + "schema": { + "$ref": "#/definitions/Schedule" + } + }, + "400": { + "examples": { + "application/json": { + "inventory": [ + "Field is not configured to prompt on launch." + ] + } + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_templates/{id}/survey_spec/": { + "delete": { + "description": "\n {\n \"name\": \"Simple Survey\",\n \"description\": \"Description of the simple survey\",\n \"spec\": [\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"example question\",\n \t\"question_description\": \"What is your favorite color?\",\n \t\"variable\": \"favorite_color\",\n \t\"required\": false,\n \t\"default\": \"blue\"\n }\n ]\n }\n\n`name` and `description` are required elements at the beginning of the survey specification. `spec` must be a\nlist of survey items.\n\nWithin each survey item `type` must be one of:\n\n* text: For survey questions expecting a textual answer\n* password: For survey questions expecting a password or other sensitive information\n* integer: For survey questions expecting a whole number answer\n* float: For survey questions expecting a decimal number\n* multiplechoice: For survey questions where one option from a list is required\n* multiselect: For survey questions where multiple items from a presented list can be selected\n\nEach item must contain a `question_name` and `question_description` field that describes the survey question itself.\nThe `variable` elements of each survey items represents the key that will be given to the playbook when the workflow job template\nis launched. It will contain the value as a result of the survey.\n\nHere is a more comprehensive example showing the various question types and their acceptable parameters:\n\n {\n \"name\": \"Simple\",\n \"description\": \"Description\",\n \"spec\": [\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"cantbeshort\",\n \t\"question_description\": \"What is a long answer\",\n \t\"variable\": \"long_answer\",\n \t\"choices\": \"\",\n \t\"min\": 5,\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"Leeloo Minai Lekarariba-Laminai-Tchai Ekbat De Sebat\"\n },\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"cantbelong\",\n \t\"question_description\": \"What is a short answer\",\n \t\"variable\": \"short_answer\",\n \t\"choices\": \"\",\n \t\"min\": \"\",\n \t\"max\": 7,\n \t\"required\": false,\n \t\"default\": \"leeloo\"\n },\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"reqd\",\n \t\"question_description\": \"I should be required\",\n \t\"variable\": \"reqd_answer\",\n \t\"choices\": \"\",\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": true,\n \t\"default\": \"NOT OPTIONAL\"\n },\n {\n \t\"type\": \"multiplechoice\",\n \t\"question_name\": \"achoice\",\n \t\"question_description\": \"Need one of these\",\n \t\"variable\": \"single_choice\",\n \t\"choices\": [\"one\", \"two\"],\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"one\"\n },\n {\n \t\"type\": \"multiselect\",\n \t\"question_name\": \"mchoice\",\n \t\"question_description\": \"Can have multiples of these\",\n \t\"variable\": \"multi_choice\",\n \t\"choices\": [\"one\", \"two\", \"three\"],\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"one\\nthree\"\n },\n {\n \"type\": \"integer\",\n \"question_name\": \"integerchoice\",\n \"question_description\": \"I need an int here\",\n \"variable\": \"int_answer\",\n \"choices\": \"\",\n \"min\": 1,\n \"max\": 5,\n \"required\": false,\n \"default\": \"\"\n },\n {\n \"type\": \"float\",\n \"question_name\": \"float\",\n \"question_description\": \"I need a float here\",\n \"variable\": \"float_answer\",\n \"choices\": \"\",\n \"min\": 2,\n \"max\": 5,\n \"required\": false,\n \"default\": \"\"\n }\n ]\n }", + "operationId": "api_workflow_job_templates_survey_spec_delete", + "parameters": [], + "responses": { + "204": { + "description": "" + } + }, + "summary": "Here is an example survey specification", + "tags": [ + "api" + ] + }, + "get": { + "description": "\n {\n \"name\": \"Simple Survey\",\n \"description\": \"Description of the simple survey\",\n \"spec\": [\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"example question\",\n \t\"question_description\": \"What is your favorite color?\",\n \t\"variable\": \"favorite_color\",\n \t\"required\": false,\n \t\"default\": \"blue\"\n }\n ]\n }\n\n`name` and `description` are required elements at the beginning of the survey specification. `spec` must be a\nlist of survey items.\n\nWithin each survey item `type` must be one of:\n\n* text: For survey questions expecting a textual answer\n* password: For survey questions expecting a password or other sensitive information\n* integer: For survey questions expecting a whole number answer\n* float: For survey questions expecting a decimal number\n* multiplechoice: For survey questions where one option from a list is required\n* multiselect: For survey questions where multiple items from a presented list can be selected\n\nEach item must contain a `question_name` and `question_description` field that describes the survey question itself.\nThe `variable` elements of each survey items represents the key that will be given to the playbook when the workflow job template\nis launched. It will contain the value as a result of the survey.\n\nHere is a more comprehensive example showing the various question types and their acceptable parameters:\n\n {\n \"name\": \"Simple\",\n \"description\": \"Description\",\n \"spec\": [\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"cantbeshort\",\n \t\"question_description\": \"What is a long answer\",\n \t\"variable\": \"long_answer\",\n \t\"choices\": \"\",\n \t\"min\": 5,\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"Leeloo Minai Lekarariba-Laminai-Tchai Ekbat De Sebat\"\n },\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"cantbelong\",\n \t\"question_description\": \"What is a short answer\",\n \t\"variable\": \"short_answer\",\n \t\"choices\": \"\",\n \t\"min\": \"\",\n \t\"max\": 7,\n \t\"required\": false,\n \t\"default\": \"leeloo\"\n },\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"reqd\",\n \t\"question_description\": \"I should be required\",\n \t\"variable\": \"reqd_answer\",\n \t\"choices\": \"\",\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": true,\n \t\"default\": \"NOT OPTIONAL\"\n },\n {\n \t\"type\": \"multiplechoice\",\n \t\"question_name\": \"achoice\",\n \t\"question_description\": \"Need one of these\",\n \t\"variable\": \"single_choice\",\n \t\"choices\": [\"one\", \"two\"],\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"one\"\n },\n {\n \t\"type\": \"multiselect\",\n \t\"question_name\": \"mchoice\",\n \t\"question_description\": \"Can have multiples of these\",\n \t\"variable\": \"multi_choice\",\n \t\"choices\": [\"one\", \"two\", \"three\"],\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"one\\nthree\"\n },\n {\n \"type\": \"integer\",\n \"question_name\": \"integerchoice\",\n \"question_description\": \"I need an int here\",\n \"variable\": \"int_answer\",\n \"choices\": \"\",\n \"min\": 1,\n \"max\": 5,\n \"required\": false,\n \"default\": \"\"\n },\n {\n \"type\": \"float\",\n \"question_name\": \"float\",\n \"question_description\": \"I need a float here\",\n \"variable\": \"float_answer\",\n \"choices\": \"\",\n \"min\": 2,\n \"max\": 5,\n \"required\": false,\n \"default\": \"\"\n }\n ]\n }", + "operationId": "api_workflow_job_templates_survey_spec_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Empty" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Here is an example survey specification", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "\n {\n \"name\": \"Simple Survey\",\n \"description\": \"Description of the simple survey\",\n \"spec\": [\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"example question\",\n \t\"question_description\": \"What is your favorite color?\",\n \t\"variable\": \"favorite_color\",\n \t\"required\": false,\n \t\"default\": \"blue\"\n }\n ]\n }\n\n`name` and `description` are required elements at the beginning of the survey specification. `spec` must be a\nlist of survey items.\n\nWithin each survey item `type` must be one of:\n\n* text: For survey questions expecting a textual answer\n* password: For survey questions expecting a password or other sensitive information\n* integer: For survey questions expecting a whole number answer\n* float: For survey questions expecting a decimal number\n* multiplechoice: For survey questions where one option from a list is required\n* multiselect: For survey questions where multiple items from a presented list can be selected\n\nEach item must contain a `question_name` and `question_description` field that describes the survey question itself.\nThe `variable` elements of each survey items represents the key that will be given to the playbook when the workflow job template\nis launched. It will contain the value as a result of the survey.\n\nHere is a more comprehensive example showing the various question types and their acceptable parameters:\n\n {\n \"name\": \"Simple\",\n \"description\": \"Description\",\n \"spec\": [\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"cantbeshort\",\n \t\"question_description\": \"What is a long answer\",\n \t\"variable\": \"long_answer\",\n \t\"choices\": \"\",\n \t\"min\": 5,\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"Leeloo Minai Lekarariba-Laminai-Tchai Ekbat De Sebat\"\n },\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"cantbelong\",\n \t\"question_description\": \"What is a short answer\",\n \t\"variable\": \"short_answer\",\n \t\"choices\": \"\",\n \t\"min\": \"\",\n \t\"max\": 7,\n \t\"required\": false,\n \t\"default\": \"leeloo\"\n },\n {\n \t\"type\": \"text\",\n \t\"question_name\": \"reqd\",\n \t\"question_description\": \"I should be required\",\n \t\"variable\": \"reqd_answer\",\n \t\"choices\": \"\",\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": true,\n \t\"default\": \"NOT OPTIONAL\"\n },\n {\n \t\"type\": \"multiplechoice\",\n \t\"question_name\": \"achoice\",\n \t\"question_description\": \"Need one of these\",\n \t\"variable\": \"single_choice\",\n \t\"choices\": [\"one\", \"two\"],\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"one\"\n },\n {\n \t\"type\": \"multiselect\",\n \t\"question_name\": \"mchoice\",\n \t\"question_description\": \"Can have multiples of these\",\n \t\"variable\": \"multi_choice\",\n \t\"choices\": [\"one\", \"two\", \"three\"],\n \t\"min\": \"\",\n \t\"max\": \"\",\n \t\"required\": false,\n \t\"default\": \"one\\nthree\"\n },\n {\n \"type\": \"integer\",\n \"question_name\": \"integerchoice\",\n \"question_description\": \"I need an int here\",\n \"variable\": \"int_answer\",\n \"choices\": \"\",\n \"min\": 1,\n \"max\": 5,\n \"required\": false,\n \"default\": \"\"\n },\n {\n \"type\": \"float\",\n \"question_name\": \"float\",\n \"question_description\": \"I need a float here\",\n \"variable\": \"float_answer\",\n \"choices\": \"\",\n \"min\": 2,\n \"max\": 5,\n \"required\": false,\n \"default\": \"\"\n }\n ]\n }", + "operationId": "api_workflow_job_templates_survey_spec_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "description": "A survey that asks about n.", + "name": "n survey", + "spec": [ + { + "choices": "", + "default": 0, + "index": 0, + "max": 100, + "min": -100, + "question_description": "A question about new_question.", + "question_name": "Enter a value for new_question.", + "required": true, + "type": "integer", + "variable": "new_question" + } + ] + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/Empty" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Here is an example survey specification", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_templates/{id}/webhook_key/": { + "get": { + "description": "", + "operationId": "api_workflow_job_templates_webhook_key_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "", + "examples": { + "application/json": { + "webhook_key": "" + } + }, + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Empty" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "No Description for get on /api/{version}/workflow_job_templates/{id}/webhook_key/", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "", + "operationId": "api_workflow_job_templates_webhook_key_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Empty" + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "webhook_key": "u5kJSbzbzPfMhV6Hd6AbBRKJnqp4RrIk3GeHIuRRNhG2mJwiDI" + } + }, + "schema": { + "$ref": "#/definitions/Empty" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "No Description for post on /api/{version}/workflow_job_templates/{id}/webhook_key/", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_job_templates/{id}/workflow_jobs/": { + "get": { + "description": "workflow jobs associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow jobs\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job records. \n\n## Results\n\nEach workflow job data structure includes the following fields:\n\n* `id`: Database ID for this workflow job. (integer)\n* `type`: Data type for this workflow job. (choice)\n* `url`: URL for this workflow job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job was created. (datetime)\n* `modified`: Timestamp when this workflow job was last modified. (datetime)\n* `name`: Name of this workflow job. (string)\n* `description`: Optional description of this workflow job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `workflow_job_template`: (id)\n* `extra_vars`: (json)\n* `allow_simultaneous`: (boolean)\n* `job_template`: If automatically created for a sliced job run, the job template the workflow job was created from. (id)\n* `is_sliced_job`: (boolean)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `limit`: (string)\n* `scm_branch`: (string)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `webhook_guid`: Unique identifier of the event that triggered this webhook (string)\n* `skip_tags`: (string)\n* `job_tags`: (string)\n\n\n\n## Sorting\n\nTo specify that workflow jobs are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_workflow_jobs_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowJobList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_job_templates/{id}/workflow_nodes/": { + "get": { + "description": "in which to execute them. After a job in this workflow finishes,\nthe subsequent actions are to:\n\n - run nodes contained in \"failure_nodes\" or \"always_nodes\" if job failed\n - run nodes contained in \"success_nodes\" or \"always_nodes\" if job succeeded\n\nThe workflow job is marked as `successful` if all of the jobs running as\na part of the workflow job have completed, and the workflow job has not\nbeen canceled. Even if a job within the workflow has failed, the workflow\njob will not be marked as failed.\n\n\n# List Workflow Job Template Nodes for a Workflow Job Template:\n\nMake a GET request to this resource to retrieve a list of\nworkflow job template nodes associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job template nodes\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job template node records. \n\n## Results\n\nEach workflow job template node data structure includes the following fields:\n\n* `id`: Database ID for this workflow job template node. (integer)\n* `type`: Data type for this workflow job template node. (choice)\n* `url`: URL for this workflow job template node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template node was created. (datetime)\n* `modified`: Timestamp when this workflow job template node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `workflow_job_template`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string)\n\n\n\n## Sorting\n\nTo specify that workflow job template nodes are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_workflow_nodes_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowJobTemplateNode" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Workflow nodes reference templates to execute and define the ordering", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "in which to execute them. After a job in this workflow finishes,\nthe subsequent actions are to:\n\n - run nodes contained in \"failure_nodes\" or \"always_nodes\" if job failed\n - run nodes contained in \"success_nodes\" or \"always_nodes\" if job succeeded\n\nThe workflow job is marked as `successful` if all of the jobs running as\na part of the workflow job have completed, and the workflow job has not\nbeen canceled. Even if a job within the workflow has failed, the workflow\njob will not be marked as failed.\n\n\n# List Workflow Job Template Nodes for a Workflow Job Template:\n\nMake a GET request to this resource to retrieve a list of\nworkflow job template nodes associated with the selected\nworkflow job template.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job template nodes\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job template node records. \n\n## Results\n\nEach workflow job template node data structure includes the following fields:\n\n* `id`: Database ID for this workflow job template node. (integer)\n* `type`: Data type for this workflow job template node. (choice)\n* `url`: URL for this workflow job template node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job template node was created. (datetime)\n* `modified`: Timestamp when this workflow job template node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `workflow_job_template`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `identifier`: An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node. (string)\n\n\n\n## Sorting\n\nTo specify that workflow job template nodes are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_job_templates_workflow_nodes_create", + "parameters": [ + { + "in": "body", + "name": "data", + "schema": { + "example": { + "all_parents_must_converge": false + } + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "all_parents_must_converge": false, + "always_nodes": [], + "created": "2018-02-01T08:00:00.000000Z", + "diff_mode": null, + "execution_environment": null, + "extra_data": {}, + "failure_nodes": [], + "forks": null, + "id": 1, + "identifier": "00000000-0000-0000-0000-000000000000", + "inventory": null, + "job_slice_count": null, + "job_tags": null, + "job_type": null, + "limit": null, + "modified": "2018-02-01T08:00:00.000000Z", + "related": { + "always_nodes": "/api/v2/workflow_job_template_nodes/1/always_nodes/", + "create_approval_template": "/api/v2/workflow_job_template_nodes/1/create_approval_template/", + "credentials": "/api/v2/workflow_job_template_nodes/1/credentials/", + "failure_nodes": "/api/v2/workflow_job_template_nodes/1/failure_nodes/", + "instance_groups": "/api/v2/workflow_job_template_nodes/1/instance_groups/", + "labels": "/api/v2/workflow_job_template_nodes/1/labels/", + "success_nodes": "/api/v2/workflow_job_template_nodes/1/success_nodes/", + "workflow_job_template": "/api/v2/workflow_job_templates/1/" + }, + "scm_branch": null, + "skip_tags": null, + "success_nodes": [], + "summary_fields": { + "workflow_job_template": { + "description": "", + "id": 1, + "name": "test-workflow_job_template" + } + }, + "timeout": null, + "type": "workflow_job_template_node", + "unified_job_template": null, + "url": "/api/v2/workflow_job_template_nodes/1/", + "verbosity": null, + "workflow_job_template": 1 + } + }, + "schema": { + "$ref": "#/definitions/WorkflowJobTemplateNode" + } + }, + "400": { + "examples": { + "application/json": { + "limit": [ + "Field is not configured to prompt on launch." + ] + } + } + } + }, + "summary": "Workflow nodes reference templates to execute and define the ordering", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_jobs/": { + "get": { + "description": "workflow jobs.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow jobs\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job records. \n\n## Results\n\nEach workflow job data structure includes the following fields:\n\n* `id`: Database ID for this workflow job. (integer)\n* `type`: Data type for this workflow job. (choice)\n* `url`: URL for this workflow job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job was created. (datetime)\n* `modified`: Timestamp when this workflow job was last modified. (datetime)\n* `name`: Name of this workflow job. (string)\n* `description`: Optional description of this workflow job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `workflow_job_template`: (id)\n* `extra_vars`: (json)\n* `allow_simultaneous`: (boolean)\n* `job_template`: If automatically created for a sliced job run, the job template the workflow job was created from. (id)\n* `is_sliced_job`: (boolean)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `limit`: (string)\n* `scm_branch`: (string)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `webhook_guid`: Unique identifier of the event that triggered this webhook (string)\n* `skip_tags`: (string)\n* `job_tags`: (string)\n\n\n\n## Sorting\n\nTo specify that workflow jobs are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_jobs_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowJobList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve the list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_jobs/{id}/": { + "delete": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow job. (integer)\n* `type`: Data type for this workflow job. (choice)\n* `url`: URL for this workflow job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job was created. (datetime)\n* `modified`: Timestamp when this workflow job was last modified. (datetime)\n* `name`: Name of this workflow job. (string)\n* `description`: Optional description of this workflow job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_args`: (string)\n* `job_cwd`: (string)\n* `job_env`: (json)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `result_traceback`: (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `workflow_job_template`: (id)\n* `extra_vars`: (json)\n* `allow_simultaneous`: (boolean)\n* `job_template`: If automatically created for a sliced job run, the job template the workflow job was created from. (id)\n* `is_sliced_job`: (boolean)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `limit`: (string)\n* `scm_branch`: (string)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `webhook_guid`: Unique identifier of the event that triggered this webhook (string)\n* `skip_tags`: (string)\n* `job_tags`: (string)\n\n\n\n\n\n# Delete a Workflow Job:\n\nMake a DELETE request to this resource to delete this workflow job.", + "operationId": "api_workflow_jobs_delete", + "parameters": [], + "produces": [ + "application/json" + ], + "responses": { + "204": { + "description": "" + }, + "403": { + "examples": { + "application/json": { + "detail": "Cannot delete running job resource." + } + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow job", + "tags": [ + "api" + ] + }, + "get": { + "description": "record containing the following fields:\n\n* `id`: Database ID for this workflow job. (integer)\n* `type`: Data type for this workflow job. (choice)\n* `url`: URL for this workflow job. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job was created. (datetime)\n* `modified`: Timestamp when this workflow job was last modified. (datetime)\n* `name`: Name of this workflow job. (string)\n* `description`: Optional description of this workflow job. (string)\n* `unified_job_template`: (id)\n* `launch_type`: (choice)\n - `manual`: Manual\n - `relaunch`: Relaunch\n - `callback`: Callback\n - `scheduled`: Scheduled\n - `dependency`: Dependency\n - `workflow`: Workflow\n - `webhook`: Webhook\n - `sync`: Sync\n - `scm`: SCM Update\n* `status`: (choice)\n - `new`: New\n - `pending`: Pending\n - `waiting`: Waiting\n - `running`: Running\n - `successful`: Successful\n - `failed`: Failed\n - `error`: Error\n - `canceled`: Canceled\n* `failed`: (boolean)\n* `started`: The date and time the job was queued for starting. (datetime)\n* `finished`: The date and time the job finished execution. (datetime)\n* `canceled_on`: The date and time when the cancel request was sent. (datetime)\n* `elapsed`: Elapsed time in seconds that the job ran. (decimal)\n* `job_args`: (string)\n* `job_cwd`: (string)\n* `job_env`: (json)\n* `job_explanation`: A status field to indicate the state of the job if it wasn't able to run and capture stdout (string)\n* `result_traceback`: (string)\n* `launched_by`: (field)\n* `work_unit_id`: The Receptor work unit ID associated with this job. (string)\n* `workflow_job_template`: (id)\n* `extra_vars`: (json)\n* `allow_simultaneous`: (boolean)\n* `job_template`: If automatically created for a sliced job run, the job template the workflow job was created from. (id)\n* `is_sliced_job`: (boolean)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `limit`: (string)\n* `scm_branch`: (string)\n* `webhook_service`: Service that webhook requests will be accepted from (choice)\n - `\"\"`: ---------\n - `github`: GitHub\n - `gitlab`: GitLab\n* `webhook_credential`: Personal Access Token for posting back the status to the service API (id)\n* `webhook_guid`: Unique identifier of the event that triggered this webhook (string)\n* `skip_tags`: (string)\n* `job_tags`: (string)\n\n\n\n\n\n# Delete a Workflow Job:\n\nMake a DELETE request to this resource to delete this workflow job.", + "operationId": "api_workflow_jobs_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJob" + } + } + }, + "summary": "Make GET request to this resource to retrieve a single workflow job", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_jobs/{id}/activity_stream/": { + "get": { + "description": "activity streams associated with the selected\nworkflow job.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of activity streams\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more activity stream records. \n\n## Results\n\nEach activity stream data structure includes the following fields:\n\n* `id`: Database ID for this activity stream. (integer)\n* `type`: Data type for this activity stream. (choice)\n* `url`: URL for this activity stream. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `timestamp`: (datetime)\n* `operation`: The action taken with respect to the given object(s). (choice)\n - `create`: Entity Created\n - `update`: Entity Updated\n - `delete`: Entity Deleted\n - `associate`: Entity Associated with another Entity\n - `disassociate`: Entity was Disassociated with another Entity\n* `changes`: A summary of the new and changed values when an object is created, updated, or deleted (json)\n* `object1`: For create, update, and delete events this is the object type that was affected. For associate and disassociate events this is the object type associated or disassociated with object2. (string)\n* `object2`: Unpopulated for create, update, and delete events. For associate and disassociate events this is the object type that object1 is being associated with. (string)\n* `object_association`: When present, shows the field name of the role or relationship that changed. (field)\n* `action_node`: The cluster node the activity took place on. (string)\n* `object_type`: When present, shows the model on which the role or relationship was defined. (field)\n\n\n\n## Sorting\n\nTo specify that activity streams are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_jobs_activity_stream_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/ActivityStream" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_jobs/{id}/cancel/": { + "get": { + "description": "canceled. The response will include the following field:\n\n* `can_cancel`: Indicates whether this workflow job is in a state that can\n be canceled (boolean, read-only)\n\nMake a POST request to this endpoint to submit a request to cancel a pending\nor running workflow job. The response status code will be 202 if the\nrequest to cancel was successfully submitted, or 405 if the workflow job\ncannot be canceled.", + "operationId": "api_workflow_jobs_cancel_read", + "parameters": [], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobCancel" + } + } + }, + "summary": "Make a GET request to this resource to determine if the workflow job can be", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "canceled. The response will include the following field:\n\n* `can_cancel`: Indicates whether this workflow job is in a state that can\n be canceled (boolean, read-only)\n\nMake a POST request to this endpoint to submit a request to cancel a pending\nor running workflow job. The response status code will be 202 if the\nrequest to cancel was successfully submitted, or 405 if the workflow job\ncannot be canceled.", + "operationId": "api_workflow_jobs_cancel_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/WorkflowJobCancel" + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/WorkflowJobCancel" + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make a GET request to this resource to determine if the workflow job can be", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_jobs/{id}/labels/": { + "get": { + "description": "labels associated with the selected\nworkflow job.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of labels\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more label records. \n\n## Results\n\nEach label data structure includes the following fields:\n\n* `id`: Database ID for this label. (integer)\n* `type`: Data type for this label. (choice)\n* `url`: URL for this label. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this label was created. (datetime)\n* `modified`: Timestamp when this label was last modified. (datetime)\n* `name`: Name of this label. (string)\n* `organization`: Organization this label belongs to. (id)\n\n\n\n## Sorting\n\nTo specify that labels are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_jobs_labels_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Label" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_jobs/{id}/notifications/": { + "get": { + "description": "notifications associated with the selected\nworkflow job.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of notifications\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more notification records. \n\n## Results\n\nEach notification data structure includes the following fields:\n\n* `id`: Database ID for this notification. (integer)\n* `type`: Data type for this notification. (choice)\n* `url`: URL for this notification. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this notification was created. (datetime)\n* `modified`: Timestamp when this notification was last modified. (datetime)\n* `notification_template`: (id)\n* `error`: (string)\n* `status`: (choice)\n - `pending`: Pending\n - `successful`: Successful\n - `failed`: Failed\n* `notifications_sent`: (integer)\n* `notification_type`: (choice)\n - `email`: Email\n - `grafana`: Grafana\n - `irc`: IRC\n - `mattermost`: Mattermost\n - `pagerduty`: Pagerduty\n - `rocketchat`: Rocket.Chat\n - `slack`: Slack\n - `twilio`: Twilio\n - `webhook`: Webhook\n* `recipients`: (string)\n* `subject`: (string)\n* `body`: Notification body (json)\n\n\n\n## Sorting\n\nTo specify that notifications are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_jobs_notifications_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Notification" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + }, + "/api/v2/workflow_jobs/{id}/relaunch/": { + "get": { + "description": "\nIf successful, the response status code will be 201 and serialized data of the new workflow job will be returned.", + "operationId": "api_workflow_jobs_relaunch_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/Empty" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a POST request to this endpoint to launch a workflow job identical to the parent workflow job. This will spawn jobs, project updates, or inventory updates based on the unified job templates referenced in the workflow nodes in the workflow job. No POST data is accepted for this action.", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "post": { + "description": "\nIf successful, the response status code will be 201 and serialized data of the new workflow job will be returned.", + "operationId": "api_workflow_jobs_relaunch_create", + "parameters": [ + { + "in": "body", + "name": "data", + "required": true, + "schema": { + "$ref": "#/definitions/Empty" + } + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "", + "examples": { + "application/json": { + "allow_simultaneous": false, + "canceled_on": null, + "created": "2018-02-01T08:00:00.000000Z", + "description": "", + "elapsed": 0.0, + "extra_vars": "", + "failed": false, + "finished": null, + "id": 2, + "inventory": null, + "is_sliced_job": false, + "job_args": "", + "job_cwd": "", + "job_env": {}, + "job_explanation": "", + "job_tags": null, + "job_template": null, + "launch_type": "relaunch", + "launched_by": {}, + "limit": null, + "modified": "2018-02-01T08:00:00.000000Z", + "name": "test_workflow", + "related": { + "activity_stream": "/api/v2/workflow_jobs/2/activity_stream/", + "cancel": "/api/v2/workflow_jobs/2/cancel/", + "labels": "/api/v2/workflow_jobs/2/labels/", + "notifications": "/api/v2/workflow_jobs/2/notifications/", + "relaunch": "/api/v2/workflow_jobs/2/relaunch/", + "unified_job_template": "/api/v2/workflow_job_templates/1/", + "workflow_job_template": "/api/v2/workflow_job_templates/1/", + "workflow_nodes": "/api/v2/workflow_jobs/2/workflow_nodes/" + }, + "result_traceback": "", + "scm_branch": null, + "skip_tags": null, + "started": null, + "status": "pending", + "summary_fields": { + "labels": { + "count": 0, + "results": [] + }, + "unified_job_template": { + "description": "", + "id": 1, + "name": "test_workflow", + "unified_job_type": "workflow_job" + }, + "user_capabilities": { + "delete": true, + "start": true + }, + "workflow_job_template": { + "description": "", + "id": 1, + "name": "test_workflow" + } + }, + "type": "workflow_job", + "unified_job_template": 1, + "url": "/api/v2/workflow_jobs/2/", + "webhook_credential": null, + "webhook_guid": "", + "webhook_service": "", + "work_unit_id": null, + "workflow_job_template": 1 + } + }, + "schema": { + "$ref": "#/definitions/Empty" + } + }, + "400": { + "examples": { + "application/json": { + "detail": "Cannot relaunch slice workflow job orphaned from job template." + } + } + }, + "403": { + "examples": { + "application/json": { + "detail": "You do not have permission to perform this action." + } + } + } + }, + "summary": "Make a POST request to this endpoint to launch a workflow job identical to the parent workflow job. This will spawn jobs, project updates, or inventory updates based on the unified job templates referenced in the workflow nodes in the workflow job. No POST data is accepted for this action.", + "tags": [ + "api" + ] + } + }, + "/api/v2/workflow_jobs/{id}/workflow_nodes/": { + "get": { + "description": "workflow job nodes associated with the selected\nworkflow job.\n\nThe resulting data structure contains:\n\n {\n \"count\": 99,\n \"next\": null,\n \"previous\": null,\n \"results\": [\n ...\n ]\n }\n\nThe `count` field indicates the total number of workflow job nodes\nfound for the given query. The `next` and `previous` fields provides links to\nadditional results if there are more than will fit on a single page. The\n`results` list contains zero or more workflow job node records. \n\n## Results\n\nEach workflow job node data structure includes the following fields:\n\n* `id`: Database ID for this workflow job node. (integer)\n* `type`: Data type for this workflow job node. (choice)\n* `url`: URL for this workflow job node. (string)\n* `related`: Data structure with URLs of related resources. (object)\n* `summary_fields`: Data structure with name/description for related resources. The output for some objects may be limited for performance reasons. (object)\n* `created`: Timestamp when this workflow job node was created. (datetime)\n* `modified`: Timestamp when this workflow job node was last modified. (datetime)\n* `extra_data`: (json)\n* `inventory`: Inventory applied as a prompt, assuming job template prompts for inventory (id)\n* `scm_branch`: (string)\n* `job_type`: (choice)\n - `None`: ---------\n - `\"\"`: ---------\n - `run`: Run\n - `check`: Check\n* `job_tags`: (string)\n* `skip_tags`: (string)\n* `limit`: (string)\n* `diff_mode`: (boolean)\n* `verbosity`: (choice)\n - `None`: ---------\n - `0`: 0 (Normal)\n - `1`: 1 (Verbose)\n - `2`: 2 (More Verbose)\n - `3`: 3 (Debug)\n - `4`: 4 (Connection Debug)\n - `5`: 5 (WinRM Debug)\n* `execution_environment`: The container image to be used for execution. (id)\n* `forks`: (integer)\n* `job_slice_count`: (integer)\n* `timeout`: (integer)\n* `job`: (id)\n* `workflow_job`: (id)\n* `unified_job_template`: (id)\n* `success_nodes`: (field)\n* `failure_nodes`: (field)\n* `always_nodes`: (field)\n* `all_parents_must_converge`: If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node (boolean)\n* `do_not_run`: Indicates that a job will not be created when True. Workflow runtime semantics will mark this True if the node is in a path that will decidedly not be ran. A value of False means the node may not run. (boolean)\n* `identifier`: An identifier coresponding to the workflow job template node that this node was created from. (string)\n\n\n\n## Sorting\n\nTo specify that workflow job nodes are returned in a particular\norder, use the `order_by` query string parameter on the GET request.\n\n ?order_by=name\n\nPrefix the field name with a dash `-` to sort in reverse:\n\n ?order_by=-name\n\nMultiple sorting fields may be specified by separating the field names with a\ncomma `,`:\n\n ?order_by=name,some_other_field\n\n## Pagination\n\nUse the `page_size` query string parameter to change the number of results\nreturned for each request. Use the `page` query string parameter to retrieve\na particular page of results.\n\n ?page_size=100&page=2\n\nThe `previous` and `next` links returned with the results will set these query\nstring parameters automatically.\n\n## Searching\n\nUse the `search` query string parameter to perform a case-insensitive search\nwithin all designated text fields of a model.\n\n ?search=findme\n\n(_Added in Ansible Tower 3.1.0_) Search across related fields:\n\n ?related__search=findme", + "operationId": "api_workflow_jobs_workflow_nodes_list", + "parameters": [ + { + "description": "A search term.", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "A page number within the paginated result set.", + "in": "query", + "name": "page", + "required": false, + "type": "integer" + }, + { + "description": "Number of results to return per page.", + "in": "query", + "name": "page_size", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "properties": { + "count": { + "type": "integer" + }, + "next": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "previous": { + "format": "uri", + "type": "string", + "x-nullable": true + }, + "results": { + "items": { + "$ref": "#/definitions/WorkflowJobNodeList" + }, + "type": "array" + } + }, + "required": [ + "count", + "results" + ], + "type": "object" + } + } + }, + "summary": "Make a GET request to this resource to retrieve a list of", + "tags": [ + "api" + ] + }, + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ] + } + }, + "produces": [ + "application/json" + ], + "schemes": [ + "https" + ], + "security": [ + { + "Basic": [] + } + ], + "securityDefinitions": { + "Basic": { + "type": "basic" + } + }, + "swagger": "2.0" +} \ No newline at end of file diff --git a/downstream/aap-common/apache-2.0-license.adoc b/downstream/aap-common/apache-2.0-license.adoc new file mode 100644 index 0000000000..e40eeb1791 --- /dev/null +++ b/downstream/aap-common/apache-2.0-license.adoc @@ -0,0 +1,57 @@ +[id="apache-2.0-license"] + +[.text-center] +*Apache License* +[.text-center] +Version 2.0, January 2004 +[.text-center] +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +*1. Definitions.* + +*"License"* shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +*"Licensor"* shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +*"Legal Entity"* shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, *"control"* means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +*"You"* (or *"Your"*) shall mean an individual or Legal Entity exercising permissions granted by this License. + +*"Source"* form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +*"Object"* form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +*"Work"* shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +*"Derivative Works"* shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +*"Contribution"* shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, *"submitted"* means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as *"Not a Contribution."* + +*"Contributor"* shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +*2. Grant of Copyright License.* Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +*3. Grant of Patent License.* Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +*4. Redistribution.* You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + +.. You must give any other recipients of the Work or Derivative Works a copy of this License; and +.. You must cause any modified files to carry prominent notices stating that You changed the files; and +.. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and +.. If the Work includes a *"NOTICE"* text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +*5. Submission of Contributions.* Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +*6. Trademarks.* This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +*7. Disclaimer of Warranty.* Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +*8. Limitation of Liability.* In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +*9. Accepting Warranty or Additional Liability.* While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/downstream/aap-common/external-site-disclaimer.adoc b/downstream/aap-common/external-site-disclaimer.adoc index 020020f7eb..b33617837b 100644 --- a/downstream/aap-common/external-site-disclaimer.adoc +++ b/downstream/aap-common/external-site-disclaimer.adoc @@ -12,9 +12,9 @@ // The following example adds a symlink to snippets from a hub title // $ cd /titles/hub/getting-started // $ ln -s ../../../snippets ./snippets -// +// // Including the file in a document // Add the following in the file where you want the text to be included: // include::snippets/external-site-disclaimer.adoc[] - -*Disclaimer*: Links contained in this note to external website(s) are provided for convenience only. Red Hat has not reviewed the links and is not responsible for the content or its availability. The inclusion of any link to an external website does not imply endorsement by Red Hat of the website or their entities, products or services. You agree that Red Hat is not responsible or liable for any loss or expenses that may result due to your use of (or reliance on) the external site or content. +//[ddacosta] generalized this to be usable in broader applications. +*Disclaimer*: Links contained in this information to external website(s) are provided for convenience only. Red Hat has not reviewed the links and is not responsible for the content or its availability. The inclusion of any link to an external website does not imply endorsement by Red Hat of the website or their entities, products or services. You agree that Red Hat is not responsible or liable for any loss or expenses that may result due to your use of (or reliance on) the external site or content. diff --git a/downstream/aap-common/gplv3-license-text.adoc b/downstream/aap-common/gplv3-license-text.adoc new file mode 100644 index 0000000000..8ee4be0676 --- /dev/null +++ b/downstream/aap-common/gplv3-license-text.adoc @@ -0,0 +1,231 @@ +[id="gplv3-license-text"] + +[.text-center] +*GNU GENERAL PUBLIC LICENSE* +[.text-center] +Version 3, 29 June 2007 + +Copyright © 2007 Free Software Foundation, Inc.<> + +Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. + +[discrete] +==== Preamble +The GNU General Public License is a free, copyleft license for software and other kinds of works. + +The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. + +To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. + +For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. + +Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. + +For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. + +Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. + +Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. + +The precise terms and conditions for copying, distribution and modification follow. + +[discrete] +==== TERMS AND CONDITIONS + +.0. Definitions. +“This License” refers to version 3 of the GNU General Public License. + +“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. + +“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. + +To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. + +A “covered work” means either the unmodified Program or a work based on the Program. + +To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. + +To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. + +An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. + +.1. Source Code. +The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. + +A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. + +The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. + +The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. + +The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. + +The Corresponding Source for a work in source code form is that same work. + +.2. Basic Permissions. +All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. + +Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. + +.3. Protecting Users' Legal Rights From Anti-Circumvention Law. +No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. + +When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. + +.4. Conveying Verbatim Copies. +You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. + +You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. + +.5. Conveying Modified Source Versions. +You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: + +* a) The work must carry prominent notices stating that you modified it, and giving a relevant date. +* b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. +* c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. +* d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. +A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. + +.6. Conveying Non-Source Forms. +You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: + +* a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. +* b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. +* c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. +* d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. +* e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. + +A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. + +A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. + +“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. + +If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). + +The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. + +Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. + +.7. Additional Terms. +“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: + +* a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or +* b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or +* c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or +* d) Limiting the use for publicity purposes of names of licensors or authors of the material; or +* e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or +* f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. + +All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. + +If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. + +.8. Termination. +You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). + +However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. + +Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. + +Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. + +.9. Acceptance Not Required for Having Copies. +You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. + +.10. Automatic Licensing of Downstream Recipients. +Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. + +An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. + +.11. Patents. +A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. + +A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. + +In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. + +If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. + +A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. + +.12. No Surrender of Others' Freedom. +If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. + +.13. Use with the GNU Affero General Public License. +Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. + +.14. Revised Versions of this License. +The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. + +If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. + +Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. + +.15. Disclaimer of Warranty. +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +.16. Limitation of Liability. +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +.17. Interpretation of Sections 15 and 16. +If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. + +END OF TERMS AND CONDITIONS +[discrete] +==== How to Apply These Terms to Your New Programs +If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the “copyright” line and a pointer to where the full notice is found. +---- + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +---- +Also add information on how to contact you by electronic and paper mail. + +If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: +---- + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. +---- +The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an “about box”. + +You should also get your employer (if you work as a programmer) or school, if any, to sign a “copyright disclaimer” for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see <>. + +The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read <>. + diff --git a/downstream/aap-common/providing-feedback.adoc b/downstream/aap-common/providing-feedback.adoc index ef5a39c0e3..9fb541ab69 100644 --- a/downstream/aap-common/providing-feedback.adoc +++ b/downstream/aap-common/providing-feedback.adoc @@ -3,5 +3,5 @@ [id="providing-feedback"] = Providing feedback on Red Hat documentation -If you have a suggestion to improve this documentation, or find an error, please contact technical support at link:https://access.redhat.com[https://access.redhat.com] to create an issue on the {PlatformNameShort} Jira project using the *docs-product* component. +If you have a suggestion to improve this documentation, or find an error, you can contact technical support at link:https://access.redhat.com[https://access.redhat.com] to open a request. diff --git a/downstream/assemblies/platform/assembly-attaching-subscriptions.adoc b/downstream/archive/archived-assemblies/platform/assembly-attaching-subscriptions.adoc similarity index 93% rename from downstream/assemblies/platform/assembly-attaching-subscriptions.adoc rename to downstream/archive/archived-assemblies/platform/assembly-attaching-subscriptions.adoc index ac588ac5f1..81b9a25f79 100644 --- a/downstream/assemblies/platform/assembly-attaching-subscriptions.adoc +++ b/downstream/archive/archived-assemblies/platform/assembly-attaching-subscriptions.adoc @@ -1,4 +1,4 @@ - +// emurtoug removed this assembly from the Planning guide to avoid duplication of subscription content added to Access management and authentication [id="proc-attaching-subscriptions_{context}"] diff --git a/downstream/archive/archived-snippets/known-issue-container-content-syncing.adoc b/downstream/archive/archived-snippets/known-issue-container-content-syncing.adoc new file mode 100644 index 0000000000..0a9449e86c --- /dev/null +++ b/downstream/archive/archived-snippets/known-issue-container-content-syncing.adoc @@ -0,0 +1 @@ +* When installing the growth topology for the {PlatformNameShort} 2.5 containerized setup bundle, you must disable content syncing which is enabled by default. To disable this feature set the `hub_seed_collections` variable in the inventory file to false. See link:{URLTopologies}/container-topologies#cont-a-env-a[Container growth topology] for a sample inventory file and see link:{URLContainerizedInstall}/appendix-inventory-files-vars#ref-hub-variables[{HubNameStart} variables] for more information about this inventory file variable. \ No newline at end of file diff --git a/downstream/titles/analytics/automation-savings-planner/docinfo.xml b/downstream/archive/archived-titles/analytics/automation-savings-planner/docinfo.xml similarity index 100% rename from downstream/titles/analytics/automation-savings-planner/docinfo.xml rename to downstream/archive/archived-titles/analytics/automation-savings-planner/docinfo.xml diff --git a/downstream/titles/analytics/automation-savings-planner/master.adoc b/downstream/archive/archived-titles/analytics/automation-savings-planner/master.adoc similarity index 80% rename from downstream/titles/analytics/automation-savings-planner/master.adoc rename to downstream/archive/archived-titles/analytics/automation-savings-planner/master.adoc index f912396af9..c70185089a 100644 --- a/downstream/titles/analytics/automation-savings-planner/master.adoc +++ b/downstream/archive/archived-titles/analytics/automation-savings-planner/master.adoc @@ -1,3 +1,5 @@ +// This title has been archived due to consolidation of separate AA docs. See AAP-26519 + :imagesdir: images :experimental: :toclevels: 4 diff --git a/downstream/titles/analytics/automation-savings/docinfo.xml b/downstream/archive/archived-titles/analytics/automation-savings/docinfo.xml similarity index 100% rename from downstream/titles/analytics/automation-savings/docinfo.xml rename to downstream/archive/archived-titles/analytics/automation-savings/docinfo.xml diff --git a/downstream/titles/analytics/automation-savings/master.adoc b/downstream/archive/archived-titles/analytics/automation-savings/master.adoc similarity index 79% rename from downstream/titles/analytics/automation-savings/master.adoc rename to downstream/archive/archived-titles/analytics/automation-savings/master.adoc index 43e0419f26..bf5872c058 100644 --- a/downstream/titles/analytics/automation-savings/master.adoc +++ b/downstream/archive/archived-titles/analytics/automation-savings/master.adoc @@ -1,4 +1,4 @@ -// This assembly is included in the following assemblies: +// This title has been archived due to consolidation of separate AA docs. See AAP-26519 // :imagesdir: images diff --git a/downstream/titles/analytics/job-explorer/docinfo.xml b/downstream/archive/archived-titles/analytics/job-explorer/docinfo.xml similarity index 89% rename from downstream/titles/analytics/job-explorer/docinfo.xml rename to downstream/archive/archived-titles/analytics/job-explorer/docinfo.xml index e616023e8e..ea054d4f0d 100644 --- a/downstream/titles/analytics/job-explorer/docinfo.xml +++ b/downstream/archive/archived-titles/analytics/job-explorer/docinfo.xml @@ -1,4 +1,4 @@ -Evaluating your automation controller job runs using the job explorer +Using automation analytics Red Hat Ansible Automation Platform 2.5 Review jobs and templates in greater detail by applying filters and sorting by attributes diff --git a/downstream/titles/analytics/job-explorer/master.adoc b/downstream/archive/archived-titles/analytics/job-explorer/master.adoc similarity index 68% rename from downstream/titles/analytics/job-explorer/master.adoc rename to downstream/archive/archived-titles/analytics/job-explorer/master.adoc index cf7b70776d..fbdf6aca6a 100644 --- a/downstream/titles/analytics/job-explorer/master.adoc +++ b/downstream/archive/archived-titles/analytics/job-explorer/master.adoc @@ -1,3 +1,5 @@ +// This title has been archived due to consolidation of separate AA docs. See AAP-26519 + :imagesdir: images :experimental: :toclevels: 4 @@ -7,7 +9,7 @@ include::attributes/attributes.adoc[] :analytics_automation_savings: [[analytics_automation_savings]] -= Evaluating your automation controller job runs using the job explorer += Using automation analytics include::{Boilerplate}[] diff --git a/downstream/titles/analytics/reports/docinfo.xml b/downstream/archive/archived-titles/analytics/reports/docinfo.xml similarity index 100% rename from downstream/titles/analytics/reports/docinfo.xml rename to downstream/archive/archived-titles/analytics/reports/docinfo.xml diff --git a/downstream/titles/analytics/reports/master.adoc b/downstream/archive/archived-titles/analytics/reports/master.adoc similarity index 76% rename from downstream/titles/analytics/reports/master.adoc rename to downstream/archive/archived-titles/analytics/reports/master.adoc index 99cb395934..344fbef221 100644 --- a/downstream/titles/analytics/reports/master.adoc +++ b/downstream/archive/archived-titles/analytics/reports/master.adoc @@ -1,3 +1,5 @@ +// This title has been archived due to consolidation of separate AA docs. See AAP-26519 + :imagesdir: images :experimental: :toclevels: 4 diff --git a/downstream/titles/controller/controller-getting-started/docinfo.xml b/downstream/archive/archived-titles/controller/controller-getting-started/docinfo.xml similarity index 100% rename from downstream/titles/controller/controller-getting-started/docinfo.xml rename to downstream/archive/archived-titles/controller/controller-getting-started/docinfo.xml diff --git a/downstream/titles/controller/controller-getting-started/master.adoc b/downstream/archive/archived-titles/controller/controller-getting-started/master.adoc similarity index 100% rename from downstream/titles/controller/controller-getting-started/master.adoc rename to downstream/archive/archived-titles/controller/controller-getting-started/master.adoc diff --git a/downstream/archive/archived-titles/controller/docinfo.xml b/downstream/archive/archived-titles/controller/controller-guide/docinfo.xml similarity index 100% rename from downstream/archive/archived-titles/controller/docinfo.xml rename to downstream/archive/archived-titles/controller/controller-guide/docinfo.xml diff --git a/downstream/archive/archived-titles/controller/master.adoc b/downstream/archive/archived-titles/controller/controller-guide/master.adoc similarity index 100% rename from downstream/archive/archived-titles/controller/master.adoc rename to downstream/archive/archived-titles/controller/controller-guide/master.adoc diff --git a/downstream/titles/dev-guide/aap-common b/downstream/archive/archived-titles/dev-guide/aap-common similarity index 100% rename from downstream/titles/dev-guide/aap-common rename to downstream/archive/archived-titles/dev-guide/aap-common diff --git a/downstream/titles/dev-guide/attributes b/downstream/archive/archived-titles/dev-guide/attributes similarity index 100% rename from downstream/titles/dev-guide/attributes rename to downstream/archive/archived-titles/dev-guide/attributes diff --git a/downstream/titles/dev-guide/core b/downstream/archive/archived-titles/dev-guide/core similarity index 100% rename from downstream/titles/dev-guide/core rename to downstream/archive/archived-titles/dev-guide/core diff --git a/downstream/titles/dev-guide/dev-guide b/downstream/archive/archived-titles/dev-guide/dev-guide similarity index 100% rename from downstream/titles/dev-guide/dev-guide rename to downstream/archive/archived-titles/dev-guide/dev-guide diff --git a/downstream/titles/dev-guide/docinfo.xml b/downstream/archive/archived-titles/dev-guide/docinfo.xml similarity index 87% rename from downstream/titles/dev-guide/docinfo.xml rename to downstream/archive/archived-titles/dev-guide/docinfo.xml index d001122158..d5bf3f27ad 100644 --- a/downstream/titles/dev-guide/docinfo.xml +++ b/downstream/archive/archived-titles/dev-guide/docinfo.xml @@ -1,7 +1,7 @@ Red Hat Ansible Automation Platform creator guide Red Hat Ansible Automation Platform 2.5 -Learn to create automation content with Ansible +Create automation content with Ansible This guide helps developers learn how to use Ansible to create content for automation. diff --git a/downstream/titles/dev-guide/images b/downstream/archive/archived-titles/dev-guide/images similarity index 100% rename from downstream/titles/dev-guide/images rename to downstream/archive/archived-titles/dev-guide/images diff --git a/downstream/titles/dev-guide/master.adoc b/downstream/archive/archived-titles/dev-guide/master.adoc similarity index 100% rename from downstream/titles/dev-guide/master.adoc rename to downstream/archive/archived-titles/dev-guide/master.adoc diff --git a/downstream/titles/dev-guide/navigator b/downstream/archive/archived-titles/dev-guide/navigator similarity index 100% rename from downstream/titles/dev-guide/navigator rename to downstream/archive/archived-titles/dev-guide/navigator diff --git a/downstream/titles/eda/eda-getting-started-guide/docinfo.xml b/downstream/archive/archived-titles/eda/eda-getting-started-guide/docinfo.xml similarity index 85% rename from downstream/titles/eda/eda-getting-started-guide/docinfo.xml rename to downstream/archive/archived-titles/eda/eda-getting-started-guide/docinfo.xml index de5a62f77c..ddcac75870 100644 --- a/downstream/titles/eda/eda-getting-started-guide/docinfo.xml +++ b/downstream/archive/archived-titles/eda/eda-getting-started-guide/docinfo.xml @@ -1,7 +1,7 @@ -Getting started with Event-Driven Ansible guide +Getting started with Event-Driven Ansible Red Hat Ansible Automation Platform 2.5 -Learn about the benefits and how to get started using Event-Driven Ansible. +Learn about the benefits and how to get started using Event-Driven Ansible Event-Driven Ansible is a new way to enhance and expand automation by improving IT speed and agility while enabling consistency and resilience. This feature is designed for simplicity and flexibility. diff --git a/downstream/titles/eda/eda-getting-started-guide/master.adoc b/downstream/archive/archived-titles/eda/eda-getting-started-guide/master.adoc similarity index 93% rename from downstream/titles/eda/eda-getting-started-guide/master.adoc rename to downstream/archive/archived-titles/eda/eda-getting-started-guide/master.adoc index d70e26df28..6f96c67f34 100644 --- a/downstream/titles/eda/eda-getting-started-guide/master.adoc +++ b/downstream/archive/archived-titles/eda/eda-getting-started-guide/master.adoc @@ -9,7 +9,7 @@ include::attributes/attributes.adoc[] // Book Title -= Getting started with Event-Driven Ansible guide += Getting started with Event-Driven Ansible Thank you for your interest in {EDAname}. {EDAname} is a new way to enhance and expand automation. It helps teams automate decision-making and improve IT speed and agility. diff --git a/downstream/titles/hub/getting-started/docinfo.xml b/downstream/archive/archived-titles/hub/getting-started/docinfo.xml similarity index 100% rename from downstream/titles/hub/getting-started/docinfo.xml rename to downstream/archive/archived-titles/hub/getting-started/docinfo.xml diff --git a/downstream/titles/hub/getting-started/master.adoc b/downstream/archive/archived-titles/hub/getting-started/master.adoc similarity index 100% rename from downstream/titles/hub/getting-started/master.adoc rename to downstream/archive/archived-titles/hub/getting-started/master.adoc diff --git a/downstream/titles/release-notes/topics/installer-24-11.adoc b/downstream/archive/archived-titles/release-notes/async/installer-24-11.adoc similarity index 100% rename from downstream/titles/release-notes/topics/installer-24-11.adoc rename to downstream/archive/archived-titles/release-notes/async/installer-24-11.adoc diff --git a/downstream/titles/release-notes/topics/installer-24-12.adoc b/downstream/archive/archived-titles/release-notes/async/installer-24-12.adoc similarity index 100% rename from downstream/titles/release-notes/topics/installer-24-12.adoc rename to downstream/archive/archived-titles/release-notes/async/installer-24-12.adoc diff --git a/downstream/titles/release-notes/topics/installer-24-13.adoc b/downstream/archive/archived-titles/release-notes/async/installer-24-13.adoc similarity index 100% rename from downstream/titles/release-notes/topics/installer-24-13.adoc rename to downstream/archive/archived-titles/release-notes/async/installer-24-13.adoc diff --git a/downstream/titles/release-notes/topics/installer-24-14.adoc b/downstream/archive/archived-titles/release-notes/async/installer-24-14.adoc similarity index 100% rename from downstream/titles/release-notes/topics/installer-24-14.adoc rename to downstream/archive/archived-titles/release-notes/async/installer-24-14.adoc diff --git a/downstream/titles/release-notes/topics/installer-24-21.adoc b/downstream/archive/archived-titles/release-notes/async/installer-24-21.adoc similarity index 100% rename from downstream/titles/release-notes/topics/installer-24-21.adoc rename to downstream/archive/archived-titles/release-notes/async/installer-24-21.adoc diff --git a/downstream/titles/release-notes/topics/installer-24-22.adoc b/downstream/archive/archived-titles/release-notes/async/installer-24-22.adoc similarity index 100% rename from downstream/titles/release-notes/topics/installer-24-22.adoc rename to downstream/archive/archived-titles/release-notes/async/installer-24-22.adoc diff --git a/downstream/titles/release-notes/topics/installer-24-23.adoc b/downstream/archive/archived-titles/release-notes/async/installer-24-23.adoc similarity index 100% rename from downstream/titles/release-notes/topics/installer-24-23.adoc rename to downstream/archive/archived-titles/release-notes/async/installer-24-23.adoc diff --git a/downstream/titles/release-notes/topics/installer-24-24.adoc b/downstream/archive/archived-titles/release-notes/async/installer-24-24.adoc similarity index 100% rename from downstream/titles/release-notes/topics/installer-24-24.adoc rename to downstream/archive/archived-titles/release-notes/async/installer-24-24.adoc diff --git a/downstream/titles/release-notes/topics/installer-24-6.adoc b/downstream/archive/archived-titles/release-notes/async/installer-24-6.adoc similarity index 100% rename from downstream/titles/release-notes/topics/installer-24-6.adoc rename to downstream/archive/archived-titles/release-notes/async/installer-24-6.adoc diff --git a/downstream/titles/release-notes/topics/installer-24-61.adoc b/downstream/archive/archived-titles/release-notes/async/installer-24-61.adoc similarity index 100% rename from downstream/titles/release-notes/topics/installer-24-61.adoc rename to downstream/archive/archived-titles/release-notes/async/installer-24-61.adoc diff --git a/downstream/titles/release-notes/topics/installer-24-62.adoc b/downstream/archive/archived-titles/release-notes/async/installer-24-62.adoc similarity index 100% rename from downstream/titles/release-notes/topics/installer-24-62.adoc rename to downstream/archive/archived-titles/release-notes/async/installer-24-62.adoc diff --git a/downstream/titles/release-notes/topics/installer-24-7.adoc b/downstream/archive/archived-titles/release-notes/async/installer-24-7.adoc similarity index 100% rename from downstream/titles/release-notes/topics/installer-24-7.adoc rename to downstream/archive/archived-titles/release-notes/async/installer-24-7.adoc diff --git a/downstream/titles/release-notes/topics/rpm-24-2.adoc b/downstream/archive/archived-titles/release-notes/async/rpm-24-2.adoc similarity index 100% rename from downstream/titles/release-notes/topics/rpm-24-2.adoc rename to downstream/archive/archived-titles/release-notes/async/rpm-24-2.adoc diff --git a/downstream/titles/release-notes/topics/rpm-24-3.adoc b/downstream/archive/archived-titles/release-notes/async/rpm-24-3.adoc similarity index 100% rename from downstream/titles/release-notes/topics/rpm-24-3.adoc rename to downstream/archive/archived-titles/release-notes/async/rpm-24-3.adoc diff --git a/downstream/titles/release-notes/topics/rpm-24-4.adoc b/downstream/archive/archived-titles/release-notes/async/rpm-24-4.adoc similarity index 100% rename from downstream/titles/release-notes/topics/rpm-24-4.adoc rename to downstream/archive/archived-titles/release-notes/async/rpm-24-4.adoc diff --git a/downstream/titles/release-notes/topics/rpm-24-5.adoc b/downstream/archive/archived-titles/release-notes/async/rpm-24-5.adoc similarity index 100% rename from downstream/titles/release-notes/topics/rpm-24-5.adoc rename to downstream/archive/archived-titles/release-notes/async/rpm-24-5.adoc diff --git a/downstream/titles/release-notes/topics/rpm-24-6.adoc b/downstream/archive/archived-titles/release-notes/async/rpm-24-6.adoc similarity index 100% rename from downstream/titles/release-notes/topics/rpm-24-6.adoc rename to downstream/archive/archived-titles/release-notes/async/rpm-24-6.adoc diff --git a/downstream/titles/release-notes/topics/rpm-24-7.adoc b/downstream/archive/archived-titles/release-notes/async/rpm-24-7.adoc similarity index 100% rename from downstream/titles/release-notes/topics/rpm-24-7.adoc rename to downstream/archive/archived-titles/release-notes/async/rpm-24-7.adoc diff --git a/downstream/assemblies/aap-hardening/assembly-aap-security-enabling.adoc b/downstream/assemblies/aap-hardening/assembly-aap-security-enabling.adoc deleted file mode 100644 index 618334d889..0000000000 --- a/downstream/assemblies/aap-hardening/assembly-aap-security-enabling.adoc +++ /dev/null @@ -1,20 +0,0 @@ -ifdef::context[:parent-context: {context}] - -[id="aap-security-enabling"] -= {PlatformNameShort} as a security enabling tool - -:context: aap-security-enabling - -[role="_abstract"] - - - -//// -Consider adding a link to future Builder docs here -[role="_additional-resources"] -.Additional resources -* A bulleted list of links to other material closely related to the contents of the concept module. -* Currently, modules cannot include xrefs, so you cannot include links to other content in your collection. If you need to link to another assembly, add the xref to the assembly that includes this module. -* For more details on writing concept modules, see the link:https://github.com/redhat-documentation/modular-docs#modular-documentation-reference-guide[Modular Documentation Reference Guide]. -* Use a consistent system for file names, IDs, and titles. For tips, see _Anchor Names and File Names_ in link:https://github.com/redhat-documentation/modular-docs#modular-documentation-reference-guide[Modular Documentation Reference Guide]. -//// \ No newline at end of file diff --git a/downstream/assemblies/aap-hardening/assembly-aap-security-use-cases.adoc b/downstream/assemblies/aap-hardening/assembly-aap-security-use-cases.adoc new file mode 100644 index 0000000000..d1079af972 --- /dev/null +++ b/downstream/assemblies/aap-hardening/assembly-aap-security-use-cases.adoc @@ -0,0 +1,37 @@ +ifdef::context[:parent-context: {context}] + +[id="aap-security-use-cases"] += {PlatformNameShort} security automation use cases + +:context: aap-security-enabling + +[role="_abstract"] + +{PlatformNameShort} provides organizations the opportunity to automate many of the manual tasks required to maintain a strong IT security posture. +Areas where security operations might be automated include security event response and remediation, routine security operations, compliance with security policies and regulations, and security hardening of IT infrastructure. + +include::aap-hardening/con-security-operations-center.adoc[leveloffset=+1] +include::aap-hardening/con-patch-automation-with-aap.adoc[leveloffset=+1] +include::aap-hardening/con-benefits-of-patch-automation.adoc[leveloffset=+2] +include::aap-hardening/con-patching-examples.adoc[leveloffset=+2] +include::aap-hardening/ref-keep-up-to-date.adoc[leveloffset=+3] +include::aap-hardening/ref-install-security-updates.adoc[leveloffset=+3] +include::aap-hardening/ref-specify-package-versions.adoc[leveloffset=+3] +include::aap-hardening/ref-complex-patching-scenarios.adoc[leveloffset=+2] + + + + + + + + +//// +Consider adding a link to future Builder docs here +[role="_additional-resources"] +.Additional resources +* A bulleted list of links to other material closely related to the contents of the concept module. +* Currently, modules cannot include xrefs, so you cannot include links to other content in your collection. If you need to link to another assembly, add the xref to the assembly that includes this module. +* For more details on writing concept modules, see the link:https://github.com/redhat-documentation/modular-docs#modular-documentation-reference-guide[Modular Documentation Reference Guide]. +* Use a consistent system for file names, IDs, and titles. For tips, see _Anchor Names and File Names_ in link:https://github.com/redhat-documentation/modular-docs#modular-documentation-reference-guide[Modular Documentation Reference Guide]. +//// \ No newline at end of file diff --git a/downstream/assemblies/aap-hardening/assembly-hardening-aap.adoc b/downstream/assemblies/aap-hardening/assembly-hardening-aap.adoc index a2aa736963..37b99f9c8b 100644 --- a/downstream/assemblies/aap-hardening/assembly-hardening-aap.adoc +++ b/downstream/assemblies/aap-hardening/assembly-hardening-aap.adoc @@ -7,7 +7,8 @@ ifdef::context[:parent-context: {context}] [role="_abstract"] -This guide takes a practical approach to hardening the {PlatformNameShort} security posture, starting with the planning and architecture phase of deployment and then covering specific guidance for the installation phase. As this guide specifically covers {PlatformNameShort} running on Red Hat Enterprise Linux, hardening guidance for Red Hat Enterprise Linux will be covered where it affects the automation platform components. +This guide takes a practical approach to hardening the {PlatformNameShort} security posture, starting with the planning and architecture phase of deployment and then covering specific guidance for the installation phase. +As this guide specifically covers {PlatformNameShort} running on {RHEL}, hardening guidance for {RHEL} is covered where it affects the automation platform components. include::aap-hardening/con-planning-considerations.adoc[leveloffset=+1] include::aap-hardening/ref-architecture.adoc[leveloffset=+2] @@ -16,35 +17,38 @@ include::aap-hardening/con-dns-ntp-service-planning.adoc[leveloffset=+2] include::aap-hardening/ref-dns.adoc[leveloffset=+3] include::aap-hardening/ref-dns-load-balancing.adoc[leveloffset=+3] include::aap-hardening/ref-ntp.adoc[leveloffset=+3] -include::aap-hardening/con-user-authentication-planning.adoc[leveloffset=+2] -include::aap-hardening/ref-automation-controller-authentication.adoc[leveloffset=+3] -include::aap-hardening/ref-private-automation-hub-authentication.adoc[leveloffset=+3] +//include::aap-hardening/con-user-authentication-planning.adoc[leveloffset=+2] +include::aap-hardening/ref-aap-authentication.adoc[leveloffset=+3] +//include::aap-hardening/ref-private-automation-hub-authentication.adoc[leveloffset=+3] include::aap-hardening/con-credential-management-planning.adoc[leveloffset=+2] -include::aap-hardening/ref-automation-controller-operational-secrets.adoc[leveloffset=+3] +include::aap-hardening/ref-aap-operational-secrets.adoc[leveloffset=+3] include::aap-hardening/con-automation-use-secrets.adoc[leveloffset=+3] +include::aap-hardening/con-protect-sensitive-data-no-log.adoc[leveloffset=+3] include::aap-hardening/con-logging-log-capture.adoc[leveloffset=+2] include::aap-hardening/ref-auditing-incident-detection.adoc[leveloffset=+2] include::aap-hardening/con-rhel-host-planning.adoc[leveloffset=+2] include::aap-hardening/con-aap-additional-software.adoc[leveloffset=+3] include::aap-hardening/con-installation.adoc[leveloffset=+1] include::aap-hardening/con-install-secure-host.adoc[leveloffset=+2] -include::aap-hardening/ref-security-variables-install-inventory.adoc[leveloffset=+2] +//include::aap-hardening/ref-security-variables-install-inventory.adoc[leveloffset=+2] include::aap-hardening/proc-install-user-pki.adoc[leveloffset=+2] include::aap-hardening/ref-sensitive-variables-install-inventory.adoc[leveloffset=+2] -include::aap-hardening/con-controller-stig-considerations.adoc[leveloffset=+2] +//include::aap-hardening/con-controller-stig-considerations.adoc[leveloffset=+2] +include::aap-hardening/con-compliance-profile-considerations.adoc[leveloffset=+2] include::aap-hardening/proc-fapolicyd.adoc[leveloffset=+3] include::aap-hardening/proc-file-systems-mounted-noexec.adoc[leveloffset=+3] include::aap-hardening/proc-namespaces.adoc[leveloffset=+3] +include::aap-hardening/ref-interactive-session-timeout.adoc[leveloffset=+3] include::aap-hardening/ref-sudo-nopasswd.adoc[leveloffset=+3] include::aap-hardening/ref-initial-configuration.adoc[leveloffset=+1] include::aap-hardening/ref-infrastructure-as-code.adoc[leveloffset=+2] -include::aap-hardening/con-controller-configuration.adoc[leveloffset=+2] +//include::aap-hardening/con-controller-configuration.adoc[leveloffset=+2] include::aap-hardening/proc-configure-centralized-logging.adoc[leveloffset=+3] -include::aap-hardening/proc-configure-external-authentication.adoc[leveloffset=+3] -include::aap-hardening/con-external-credential-vault.adoc[leveloffset=+3] +//include::aap-hardening/proc-configure-external-authentication.adoc[leveloffset=+3] +include::aap-hardening/con-external-credential-vault.adoc[leveloffset=+2] include::aap-hardening/con-day-two-operations.adoc[leveloffset=+1] include::aap-hardening/con-rbac.adoc[leveloffset=+2] include::aap-hardening/ref-updates-upgrades.adoc[leveloffset=+2] -include::aap-hardening/proc-controller-stig-considerations.adoc[leveloffset=+3] +//include::aap-hardening/proc-controller-stig-considerations.adoc[leveloffset=+3] include::aap-hardening/proc-disaster-recovery-operations.adoc[leveloffset=+3] diff --git a/downstream/assemblies/aap-hardening/assembly-intro-to-aap-hardening.adoc b/downstream/assemblies/aap-hardening/assembly-intro-to-aap-hardening.adoc index fb06823779..fd5d9434a1 100644 --- a/downstream/assemblies/aap-hardening/assembly-intro-to-aap-hardening.adoc +++ b/downstream/assemblies/aap-hardening/assembly-intro-to-aap-hardening.adoc @@ -10,15 +10,29 @@ ifdef::context[:parent-context: {context}] This document provides guidance for improving the security posture (referred to as “hardening” throughout this guide) of your {PlatformName} deployment on {RHEL}. -Other deployment targets, such as OpenShift, are not currently within the scope of this guide. {PlatformNameShort} managed services available through cloud service provider marketplaces are also not within the scope of this guide. +The following are not currently within the scope of this guide: -This guide takes a practical approach to hardening the {PlatformNameShort} security posture, starting with the planning and architecture phase of deployment and then covering specific guidance for installation, initial configuration, and day two operations. As this guide specifically covers {PlatformNameShort} running on {RHEL}, hardening guidance for {RHEL} will be covered where it affects the automation platform components. Additional considerations with regards to the Defense Information Systems Agency (DISA) Security Technical Implementation Guides (STIGs) are provided for those organizations that integrate the DISA STIG as a part of their overall security strategy. +* Other deployment targets for {PlatformNameShort}, such as OpenShift. +* {PlatformNameShort} managed services available through cloud service provider marketplaces. +//* Additional considerations with regards to the _Defense Information Systems Agency_ (DISA) _Security Technical Implementation Guides_ (STIGs) [NOTE] ==== -These recommendations do not guarantee security or compliance of your deployment of {PlatformNameShort}. You must assess security from the unique requirements of your organization to address specific threats and risks and balance these against implementation factors. +Hardening and compliance for {PlatformNameShort} 2.4 includes additional considerations with regards to the specific _Defense Security Information Agency_ (DISA) _Security Technical Implementation Guides_ (STIGs) for {ControllerName}, but this guidance does not apply to {PlatformNameShort} {PlatformVers}. +==== + +This guide takes a practical approach to hardening the {PlatformNameShort} security posture, starting with the planning and architecture phase of deployment and then covering specific guidance for installation, initial configuration, and day 2 operations. +As this guide specifically covers {PlatformNameShort} running on {RHEL}, hardening guidance for {RHEL} will be covered where it affects the automation platform components. +Additional considerations with regards to the DISA STIGs for {RHEL} are provided for those organizations that integrate the DISA STIGs as a part of their overall security strategy. + +[NOTE] +==== +These recommendations do not guarantee security or compliance of your deployment of {PlatformNameShort}. +You must assess security from the unique requirements of your organization to address specific threats and risks and balance these against implementation factors. ==== include::aap-hardening/con-hardening-guide-audience.adoc[leveloffset=+1] include::aap-hardening/con-product-overview.adoc[leveloffset=+1] + +include::aap-hardening/con-deployment-methods.adoc[leveloffset=+2] include::aap-hardening/con-platform-components.adoc[leveloffset=+2] \ No newline at end of file diff --git a/downstream/assemblies/analytics/assembly-automation-savings-planner.adoc b/downstream/assemblies/analytics/assembly-automation-savings-planner.adoc index 2909a13153..62866570b8 100644 --- a/downstream/assemblies/analytics/assembly-automation-savings-planner.adoc +++ b/downstream/assemblies/analytics/assembly-automation-savings-planner.adoc @@ -6,7 +6,7 @@ ifdef::context[:parent-context: {context}] = About the {planner} -An automation savings plan gives you the ability to plan, track, and analyze the potential efficiency and cost savings of your automation initiatives. Use {InsightsName} to create an automation savings plan by defining a list of tasks needed to complete an automation job. You can then link your automation savings plans to an Ansible job template in order to accurately measure the time and cost savings upon completion of an automation job. +An automation savings plan gives you the ability to plan, track, and analyze the potential efficiency and cost savings of your automation initiatives. Use automation analytics to create an automation savings plan by defining a list of tasks needed to complete an automation job. You can then link your automation savings plans to an Ansible job template in order to accurately measure the time and cost savings upon completion of an automation job. To create an automation savings plan, you can utilize the {planner} to prioritize the various automation jobs throughout your organization and understand the potential time and cost savings for your automation initiatives. diff --git a/downstream/assemblies/builder/assembly-open-source-license.adoc b/downstream/assemblies/builder/assembly-open-source-license.adoc new file mode 100644 index 0000000000..98bc726217 --- /dev/null +++ b/downstream/assemblies/builder/assembly-open-source-license.adoc @@ -0,0 +1,5 @@ +[id="assembly-open-source-license"] + += Open source license + +include::../aap-common/apache-2.0-license.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/assemblies/builder/assembly-publishing-exec-env.adoc b/downstream/assemblies/builder/assembly-publishing-exec-env.adoc index 4282f39acf..83a9b59812 100644 --- a/downstream/assemblies/builder/assembly-publishing-exec-env.adoc +++ b/downstream/assemblies/builder/assembly-publishing-exec-env.adoc @@ -9,7 +9,7 @@ include::builder/proc-customize-ee-image.adoc[leveloffset=+1] For more details on customizing {ExecEnvShort}s based on common scenarios, see the following topics in the _Ansible Builder Documentation_: -* link:https://ansible.readthedocs.io/projects/builder/en/latest/scenario_guides/scenario_copy/[Copying arbitratory files to an execution enviornment] +* link:https://ansible.readthedocs.io/projects/builder/en/latest/scenario_guides/scenario_copy/[Copying arbitratory files to an execution environment] * link:https://ansible.readthedocs.io/projects/builder/en/latest/scenario_guides/scenario_using_env/[Building execution environments with environment variables] * link:https://ansible.readthedocs.io/projects/builder/en/latest/scenario_guides/scenario_custom/[Building execution environments with environment variables and `ansible.cfg`] diff --git a/downstream/assemblies/builder/assembly-using-builder.adoc b/downstream/assemblies/builder/assembly-using-builder.adoc index 35d69d227a..9a713d8e84 100644 --- a/downstream/assemblies/builder/assembly-using-builder.adoc +++ b/downstream/assemblies/builder/assembly-using-builder.adoc @@ -6,6 +6,7 @@ include::builder/con-why-builder.adoc[leveloffset=+1] include::builder/proc-installing-builder.adoc[leveloffset=+1] +include::platform/con-building-an-execution-environment-in-a-disconnected-environment.adoc[leveloffset=+1] include::builder/con-building-definition-file.adoc[leveloffset=+1] include::builder/proc-executing-build.adoc[leveloffset=+1] diff --git a/downstream/assemblies/builder/platform b/downstream/assemblies/builder/platform new file mode 120000 index 0000000000..06203029c9 --- /dev/null +++ b/downstream/assemblies/builder/platform @@ -0,0 +1 @@ +../../modules/platform \ No newline at end of file diff --git a/downstream/assemblies/devtools/assembly-devtools-create-roles-collection.adoc b/downstream/assemblies/devtools/assembly-devtools-create-roles-collection.adoc new file mode 100644 index 0000000000..9df9a2e8cf --- /dev/null +++ b/downstream/assemblies/devtools/assembly-devtools-create-roles-collection.adoc @@ -0,0 +1,50 @@ +ifdef::context[:parent-context-of-devtools-create-roles-collection: {context}] + +:_mod-docs-content-type: ASSEMBLY + +ifndef::context[] +[id="devtools-create-roles-collection"] +endif::[] +ifdef::context[] +[id="devtools-create-roles-collection_{context}"] +endif::[] + += Creating a collection for distributing roles + +:context: devtools-create-roles-collection + +An Ansible role is a self-contained unit of Ansible automation content that groups related +tasks and associated variables, files, handlers, and other assets in a defined directory structure. + +You can run Ansible roles in one or more plays, and reuse them across playbooks. +Invoking roles instead of tasks simplifies playbooks. +You can migrate existing standalone roles into collections, +and push them to private automation hub to share them with other users in your organization. +Distributing roles in this way is a typical way to use collections. + +With Ansible collections, you can store and distribute multiple roles in a single unit of reusable automation. +Inside a collection, you can share custom plug-ins across all roles in the collection instead of duplicating them in each role. + +You must move roles into collections if you want to use them in {PlatformNameShort}. + +You can add existing standalone roles to a collection, or add new roles to it. +Push the collection to source control and configure credentials for the repository in {PlatformNameShort}. + +include::devtools/con-devtools-plan-roles-collection.adoc[leveloffset=+1] +include::devtools/con-devtools-roles-collection-prerequisites.adoc[leveloffset=+1] + +include::devtools/proc-devtools-scaffold-roles-collection.adoc[leveloffset=+1] + +include::devtools/proc-devtools-migrate-existing-roles-collection.adoc[leveloffset=+1] +include::devtools/proc-devtools-create-new-role-in-collection.adoc[leveloffset=+1] +include::devtools/proc-devtools-docs-roles-collection.adoc[leveloffset=+1] + +// include::devtools/proc-devtools-run-roles-collection.adoc[leveloffset=+1] +// include::devtools/proc-devtools-molecule-test-roles-collection.adoc[leveloffset=+1] + +include::devtools/proc-devtools-publish-roles-collection-pah.adoc[leveloffset=+1] +include::devtools/proc-devtools-use-roles-collections-aap.adoc[leveloffset=+2] + +ifdef::parent-context-of-devtools-create-roles-collection[:context: {parent-context-of-devtools-create-roles-collection}] +ifndef::parent-context-of-devtools-create-roles-collection[:!context:] + diff --git a/downstream/assemblies/devtools/assembly-devtools-develop-collections.adoc b/downstream/assemblies/devtools/assembly-devtools-develop-collections.adoc new file mode 100644 index 0000000000..4f9e0b1c29 --- /dev/null +++ b/downstream/assemblies/devtools/assembly-devtools-develop-collections.adoc @@ -0,0 +1,28 @@ +ifdef::context[:parent-context-of-devtools-develop-collections: {context}] + +:_mod-docs-content-type: ASSEMBLY + +ifndef::context[] +[id="devtools-develop-collections"] +endif::[] +ifdef::context[] +[id="devtools-develop-collections_{context}"] +endif::[] += Developing collections + +:context: devtools-develop-collections + +Collections are a distribution format for Ansible content that can include playbooks, roles, modules, and plugins. +Red Hat provides Ansible Content Collections on Ansible automation hub that contain both {CertifiedCon} and {Valid}. + +If you have installed private automation hub, you can create collections for your organization and push them +to {PrivateHubName} so that you can use them in job templates in {PlatformNameShort}. +You can use collections to package and distribute plug-ins. These plug-ins are written in Python. + +You can also create collections to package and distribute Ansible roles, which are expressed in YAML. +You can also include playbooks and custom plug-ins that are required for these roles in the collection. +Typically, collections of roles are distributed for use within your organization. + +ifdef::parent-context-of-devtools-develop-collections[:context: {parent-context-of-devtools-develop-collections}] +ifndef::parent-context-of-devtools-develop-collections[:!context:] + diff --git a/downstream/assemblies/devtools/assembly-devtools-install.adoc b/downstream/assemblies/devtools/assembly-devtools-install.adoc index 4f6fcd5e83..e512f756aa 100644 --- a/downstream/assemblies/devtools/assembly-devtools-install.adoc +++ b/downstream/assemblies/devtools/assembly-devtools-install.adoc @@ -1,13 +1,30 @@ -ifdef::context[:parent-context: {context}] +ifdef::context[:parent-context-of-assembly-devtools-install: {context}] [id="installing-devtools"] = Installing {ToolsName} :context: installing-devtools [role="_abstract"] +Red Hat provides two options for installing {ToolsName}. +// Both options require {VSCode} (Visual Studio Code) with the Ansible extension added. -include::devtools/proc-devtools-install.adoc[leveloffset=+1] +* Installation on a RHEL container running inside {VSCode}. +You can install this option on MacOS, Windows, and Linux systems. +* Installation on your local RHEL system using an RPM (Red Hat Package Manager) package. -ifdef::parent-context[:context: {parent-context}] -ifndef::parent-context[:!context:] +include::devtools/con-devtools-requirements.adoc[leveloffset=+1] +include::devtools/proc-devtools-install-podman-desktop-wsl.adoc[leveloffset=+2] +include::devtools/proc-devtools-setup-registry-redhat-io.adoc[leveloffset=+2] +include::devtools/proc-devtools-install-vsc.adoc[leveloffset=+2] +include::devtools/proc-devtools-install-vscode-extension.adoc[leveloffset=+2] +include::devtools/proc-devtools-extension-settings.adoc[leveloffset=+2] +include::devtools/proc-devtools-extension-set-language.adoc[leveloffset=+2] +include::devtools/proc-devtools-ms-dev-containers-ext.adoc[leveloffset=+2] + +include::devtools/proc-devtools-install-container.adoc[leveloffset=+1] + +include::devtools/proc-devtools-install-rpm.adoc[leveloffset=+1] + +ifdef::parent-context-of-assembly-devtools-install[:context: {parent-context-of-assembly-devtools-install}] +ifndef::parent-context-of-assembly-devtools-install[:!context:] diff --git a/downstream/assemblies/devtools/assembly-devtools-intro.adoc b/downstream/assemblies/devtools/assembly-devtools-intro.adoc index 917ef299d5..fd9d3c7eac 100644 --- a/downstream/assemblies/devtools/assembly-devtools-intro.adoc +++ b/downstream/assemblies/devtools/assembly-devtools-intro.adoc @@ -14,6 +14,9 @@ you can use these tools from the {VSCode} user interface. Use {ToolsName} during local development of playbooks, local testing, and in a CI pipeline (linting and testing). +This document describes how to use {ToolsName} to create a playbook project that contains playbooks and roles that you can reuse within the project. +It also describes how to test the playbooks and deploy the project on your {PlatformNameShort} instance so that you can use the playbooks in automation jobs. + include::devtools/ref-devtools-components.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] diff --git a/downstream/assemblies/devtools/assembly-devtools-setup.adoc b/downstream/assemblies/devtools/assembly-devtools-setup.adoc index b5d054f0a8..2f3c1e4da9 100644 --- a/downstream/assemblies/devtools/assembly-devtools-setup.adoc +++ b/downstream/assemblies/devtools/assembly-devtools-setup.adoc @@ -7,12 +7,8 @@ ifdef::context[:parent-context: {context}] :context: devtools-setup [role="_abstract"] -include::devtools/proc-installing-vscode.adoc[leveloffset=+1] -// include::devtools/proc-directory-setup.adoc[leveloffset=+1] include::devtools/proc-setup-vscode-workspace.adoc[leveloffset=+1] -include::devtools/proc-install-vscode-extension.adoc[leveloffset=+1] -include::devtools/proc-configure-extension-settings.adoc[leveloffset=+1] -include::devtools/proc-create-python-venv.adoc[leveloffset=+1] +// include::devtools/proc-create-python-venv.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/devtools/assembly-publishing-playbook-collection-aap.adoc b/downstream/assemblies/devtools/assembly-publishing-playbook-collection-aap.adoc new file mode 100644 index 0000000000..a3753afbdc --- /dev/null +++ b/downstream/assemblies/devtools/assembly-publishing-playbook-collection-aap.adoc @@ -0,0 +1,17 @@ +ifdef::context[:parent-context: {context}] +[id="publishing-playbook-collection-aap"] + += Publishing and running your playbooks in {PlatformNameShort} + +:context: publishing-playbook-collection-aap-intro +[role="_abstract"] + +[role="_abstract"] +The following procedures describe how to deploy your new playbooks in your instance of {PlatformNameShort} so that you can use them to run automation jobs. + +include::devtools/proc-devtools-save-scm.adoc[leveloffset=+1] +include::devtools/proc-devtools-create-aap-job.adoc[leveloffset=+1] + +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] + diff --git a/downstream/assemblies/devtools/assembly-rhdh-configure.adoc b/downstream/assemblies/devtools/assembly-rhdh-configure.adoc deleted file mode 100644 index 55205cbd5b..0000000000 --- a/downstream/assemblies/devtools/assembly-rhdh-configure.adoc +++ /dev/null @@ -1,15 +0,0 @@ -ifdef::context[:parent-context: {context}] -[id="rhdh-configure_{context}"] - -= Configuring {AAPRHDH} - -:context: rhdh-configure -[role="_abstract"] - -{AAPRHDH} Configuration - -//include::devtools/ref-devtools-components.adoc[leveloffset=+1] - -ifdef::parent-context[:context: {parent-context}] -ifndef::parent-context[:!context:] - diff --git a/downstream/assemblies/devtools/assembly-rhdh-example.adoc b/downstream/assemblies/devtools/assembly-rhdh-example.adoc new file mode 100644 index 0000000000..a4b006057b --- /dev/null +++ b/downstream/assemblies/devtools/assembly-rhdh-example.adoc @@ -0,0 +1,22 @@ +ifdef::context[:parent-context: {context}] +[id="rhdh-example_{context}"] + += Example: Automate Red Hat Enterprise Linux firewall configuration + +:context: rhdh-example +[role="_abstract"] +This example demonstrates how the Ansible plug-ins can help Ansible users of all skill levels create quality Ansible content. + +As an infrastructure engineer new to Ansible, you have been tasked to create a playbook to configure a {RHEL} (RHEL) host firewall. + +The following procedures show you how to use the Ansible plug-ins and Dev Spaces to develop a playbook. + +include::devtools/proc-rhdh-firewall-example-learn.adoc[leveloffset=+1] +include::devtools/proc-rhdh-firewall-example-discover.adoc[leveloffset=+1] +include::devtools/proc-rhdh-firewall-example-create-playbook.adoc[leveloffset=+1] +include::devtools/proc-rhdh-firewall-example-new-playbook.adoc[leveloffset=+1] +include::devtools/proc-rhdh-firewall-example-edit.adoc[leveloffset=+1] + +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] + diff --git a/downstream/assemblies/devtools/assembly-rhdh-feedback.adoc b/downstream/assemblies/devtools/assembly-rhdh-feedback.adoc new file mode 100644 index 0000000000..abef66beae --- /dev/null +++ b/downstream/assemblies/devtools/assembly-rhdh-feedback.adoc @@ -0,0 +1,25 @@ +ifdef::context[:parent-context: {context}] +[id="rhdh-feedback_{context}"] + += Providing feedback in the Ansible plug-ins + +:context: rhdh-feedback +[role="_abstract"] +The Ansible plug-ins provide a feedback form where you can suggest new features and content, as well as general feedback. + +. Click the Ansible `A` icon in the {RHDH} navigation panel. +. Click the *Feedback* icon to display the feedback form. ++ +image::rhdh-feedback-form.png[Ansible plug-in feedback form] +. Enter the feedback you want to provide. +. Tick the *I understand that feedback is shared with Red Hat* checkbox. +. Click *Submit*. + +[NOTE] +==== +To ensure that Red Hat receives your feedback, exclude your {RHDH} URL in any browser ad blockers or privacy tools. +==== + +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] + diff --git a/downstream/assemblies/devtools/assembly-rhdh-install-ocp-helm.adoc b/downstream/assemblies/devtools/assembly-rhdh-install-ocp-helm.adoc new file mode 100644 index 0000000000..dd7710d889 --- /dev/null +++ b/downstream/assemblies/devtools/assembly-rhdh-install-ocp-helm.adoc @@ -0,0 +1,42 @@ +ifdef::context[:parent-context-of-rhdh-install-ocp-helm: {context}] +[id="rhdh-install-ocp-helm_{context}"] + += Installing the Ansible plug-ins with a Helm chart on {OCPShort} + +:context: rhdh-install-ocp +[role="_abstract"] +The following procedures describe how to install Ansible plug-ins in {RHDH} instances on {OCP} using a Helm chart. + +The workflow is as follows: + +. Download the Ansible plug-ins files. +. Create a plug-in registry in your OpenShift cluster to host the Ansible plug-ins. +. Add the plug-ins to the Helm chart. +. Create a custom ConfigMap. +. Add your custom ConfigMap to your Helm chart. +. Edit your custom ConfigMap and Helm chart according to the required and optional configuration procedures. ++ +[NOTE] +==== +You can save changes to your Helm and ConfigMap after each update to your configuration. +You do not have to make all the changes to these files in a single session. +==== + +include::devtools/con-rhdh-install-ocp-prereqs.adoc[leveloffset=+1] +include::devtools/con-rhdh-recommended-preconfig.adoc[leveloffset=+1] +include::devtools/proc-rhdh-download-plugins.adoc[leveloffset=+1] +include::devtools/proc-rhdh-create-plugin-registry.adoc[leveloffset=+1] + +// Required config +include::assembly-rhdh-ocp-required-installation.adoc[leveloffset=+1] +// +// Optional config +include::assembly-rhdh-ocp-configure-optional.adoc[leveloffset=+1] +// +// Full example configuration +include::assembly-rhdh-ocp-full-examples.adoc[leveloffset=+1] +// + +ifdef::parent-context-of-rhdh-install-ocp-helm[:context: {parent-context-of-rhdh-install-ocp-helm}] +ifndef::parent-context-of-rhdh-install-ocp-helm[:!context:] + diff --git a/downstream/assemblies/devtools/assembly-rhdh-install-ocp-operator.adoc b/downstream/assemblies/devtools/assembly-rhdh-install-ocp-operator.adoc new file mode 100644 index 0000000000..fd19fa8023 --- /dev/null +++ b/downstream/assemblies/devtools/assembly-rhdh-install-ocp-operator.adoc @@ -0,0 +1,52 @@ +ifdef::context[:parent-context-of-rhdh-install-ocp-operator: {context}] +[id="rhdh-install-ocp-operator_{context}"] + += Installing the {AAPRHDHShort} with the Operator on {OCPShort} + +:context: rhdh-install-ocp-operator +[role="_abstract"] +The following procedures describe how to install {AAPRHDHShort} in {RHDH} instances on {OCP} using the Operator. + +// The workflow is as follows: +// +// . Download the Ansible plug-ins files. +// . Create a plug-in registry in your OpenShift cluster to host the Ansible plug-ins. +// . Create a local backup copy of the ConfigMap for the {RHDHShort} Operator. +// . Create a custom ConfigMap. +// . Add your custom ConfigMap to your Helm chart. +// . Edit your custom ConfigMap and Helm chart according to the required and optional configuration procedures. + +include::devtools/con-rhdh-install-ocp-prereqs.adoc[leveloffset=+1] +include::devtools/con-rhdh-recommended-preconfig.adoc[leveloffset=+1] + +// Create custom ConfigMap +include::devtools/proc-rhdh-backup-operator-configmap.adoc[leveloffset=+1] +include::devtools/proc-rhdh-create-custom-configmap-operator-install.adoc[leveloffset=+1] +// Add sidecar container to custom ConfigMap +include::devtools/proc-rhdh-operator-add-custom-configmap-cr.adoc[leveloffset=+1] + +// Plug-in registry +include::devtools/proc-rhdh-download-plugins.adoc[leveloffset=+1] +include::devtools/proc-rhdh-create-plugin-registry.adoc[leveloffset=+1] + +// Install the dynamic plug-ins +include::devtools/proc-rhdh-install-dynamic-plugins-operator.adoc[leveloffset=+1] +// +// Add dynamic plug-ins to rhaap-dynamic-plugins-config +// Replace the following to reuse Helm config: +// include::devtools/proc-rhdh-operator-install-add-plugins-app-config.adoc[leveloffset=+1] + +include::devtools/proc-rhdh-add-custom-configmap.adoc[leveloffset=+1] +include::devtools/proc-rhdh-configure-devtools-server.adoc[leveloffset=+1] +include::devtools/proc-rhdh-configure-aap-details.adoc[leveloffset=+1] +include::devtools/proc-rhdh-add-plugin-software-templates.adoc[leveloffset=+1] +include::devtools/proc-rhdh-configure-rbac.adoc[leveloffset=+1] +include::assembly-rhdh-ocp-configure-optional.adoc[leveloffset=+1] + +// Full example configuration +include::devtools/ref-rhdh-full-aap-configmap-example.adoc[leveloffset=+1] +// + +ifdef::parent-context-of-rhdh-install-ocp-operator[:context: {parent-context-of-rhdh-install-ocp-operator}] +ifndef::parent-context-of-rhdh-install-ocp-operator[:!context:] + diff --git a/downstream/assemblies/devtools/assembly-rhdh-install.adoc b/downstream/assemblies/devtools/assembly-rhdh-install.adoc deleted file mode 100644 index c4d32d9280..0000000000 --- a/downstream/assemblies/devtools/assembly-rhdh-install.adoc +++ /dev/null @@ -1,16 +0,0 @@ -ifdef::context[:parent-context: {context}] -[id="rhdh-install_{context}"] - -= Installing {AAPRHDH} - -:context: rhdh-install -[role="_abstract"] - -{AAPRHDH} (`ansible-dev-tools`) is a suite of tools provided with {PlatformNameShort} to help automation creators to -create, test, and deploy playbook projects, execution environments, and collections. - -//include::devtools/ref-devtools-components.adoc[leveloffset=+1] - -ifdef::parent-context[:context: {parent-context}] -ifndef::parent-context[:!context:] - diff --git a/downstream/assemblies/devtools/assembly-rhdh-intro.adoc b/downstream/assemblies/devtools/assembly-rhdh-intro.adoc index 59ac7f4829..8e297ab8cc 100644 --- a/downstream/assemblies/devtools/assembly-rhdh-intro.adoc +++ b/downstream/assemblies/devtools/assembly-rhdh-intro.adoc @@ -6,9 +6,10 @@ ifdef::context[:parent-context: {context}] :context: rhdh-intro [role="_abstract"] -{AAPRHDH} introduction placeholder - -//include::devtools/ref-devtools-components.adoc[leveloffset=+1] +include::devtools/ref-rhdh-about-rhdh.adoc[leveloffset=+1] +include::devtools/ref-rhdh-about-plugins.adoc[leveloffset=+1] +include::devtools/ref-rhdh-architecture.adoc[leveloffset=+1] +// include::devtools/ref-devtools-components.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/devtools/assembly-rhdh-ocp-configure-optional.adoc b/downstream/assemblies/devtools/assembly-rhdh-ocp-configure-optional.adoc new file mode 100644 index 0000000000..3800180d56 --- /dev/null +++ b/downstream/assemblies/devtools/assembly-rhdh-ocp-configure-optional.adoc @@ -0,0 +1,18 @@ +ifdef::context[:parent-context-of-rhdh-ocp-configure-optional: {context}] +[id="rhdh-ocp-configure-optional_{context}"] + += Optional configuration for Ansible plug-ins + +:context: rhdh-ocp-configure-optional_{parent-context-of-rhdh-ocp-configure-optional} +[role="_abstract"] + +include::devtools/proc-rhdh-enable-rhdh-authentication.adoc[leveloffset=+1] +include::devtools/proc-rhdh-configure-optional-integrations.adoc[leveloffset=+1] +include::devtools/proc-rhdh-configure-devspaces.adoc[leveloffset=+2] +include::devtools/proc-rhdh-configure-pah-url.adoc[leveloffset=+2] + +ifdef::parent-context-of-rhdh-ocp-configure-optional[:context: {parent-context-of-rhdh-ocp-configure-optional}] +ifndef::parent-context-of-rhdh-ocp-configure-optional[:!context:] + +ifdef::context[:parent-context-of-assembly: {context}] + diff --git a/downstream/assemblies/devtools/assembly-rhdh-ocp-full-examples.adoc b/downstream/assemblies/devtools/assembly-rhdh-ocp-full-examples.adoc new file mode 100644 index 0000000000..2c4565e97d --- /dev/null +++ b/downstream/assemblies/devtools/assembly-rhdh-ocp-full-examples.adoc @@ -0,0 +1,15 @@ +ifdef::context[:parent-context-of-rhdh-ocp-full-examples: {context}] +[id="rhdh-ocp-full-examples_{context}"] + += Full examples + +:context: rhdh-ocp-full-examples +[role="_abstract"] + + +include::devtools/ref-rhdh-full-aap-configmap-example.adoc[leveloffset=+1] +include::devtools/ref-rhdh-full-helm-chart-ansible-plugins.adoc[leveloffset=+1] + +ifdef::parent-context-of-rhdh-ocp-full-examples[:context: {parent-context-of-rhdh-ocp-full-examples}] +ifndef::parent-context-of-rhdh-ocp-full-examples[:!context:] + diff --git a/downstream/assemblies/devtools/assembly-rhdh-ocp-required-installation.adoc b/downstream/assemblies/devtools/assembly-rhdh-ocp-required-installation.adoc new file mode 100644 index 0000000000..d9f6558b7e --- /dev/null +++ b/downstream/assemblies/devtools/assembly-rhdh-ocp-required-installation.adoc @@ -0,0 +1,21 @@ +ifdef::context[:parent-context-of-rhdh-ocp-required-installation: {context}] +[id="rhdh-ocp-required-installation_{context}"] + += Required configuration + +:context: rhdh-ocp-required-installation +[role="_abstract"] + +include::devtools/proc-rhdh-add-plugin-config.adoc[leveloffset=+1] +include::devtools/proc-rhdh-devtools-sidecar.adoc[leveloffset=+1] +include::devtools/proc-rhdh-add-pull-secret-helm.adoc[leveloffset=+2] +include::devtools/proc-rhdh-add-devtools-container.adoc[leveloffset=+2] +include::devtools/proc-rhdh-add-custom-configmap.adoc[leveloffset=+1] +include::devtools/proc-rhdh-configure-devtools-server.adoc[leveloffset=+1] +include::devtools/proc-rhdh-configure-aap-details.adoc[leveloffset=+1] +include::devtools/proc-rhdh-add-plugin-software-templates.adoc[leveloffset=+1] +include::devtools/proc-rhdh-configure-rbac.adoc[leveloffset=+1] + +ifdef::parent-context-of-rhdh-ocp-required-installation[:context: {parent-context-of-rhdh-ocp-required-installation}] +ifndef::parent-context-of-rhdh-ocp-required-installation[:!context:] + diff --git a/downstream/assemblies/devtools/assembly-rhdh-subscription-warnings.adoc b/downstream/assemblies/devtools/assembly-rhdh-subscription-warnings.adoc new file mode 100644 index 0000000000..e765ee9c9c --- /dev/null +++ b/downstream/assemblies/devtools/assembly-rhdh-subscription-warnings.adoc @@ -0,0 +1,25 @@ +ifdef::context[:parent-context-of-rhdh-subscription-warnings: {context}] +[id="rhdh-subscription-warnings_{context}"] + += Ansible plug-ins subscription warning messages + +:context: rhdh-subscription-warnings + +[role="_abstract"] +The Ansible plug-ins display a subscription warning banner in the user interface in the following scenarios: + +* xref:rhdh-warning-unable-connect-aap_rhdh-subscription-warnings[Unable to connect to Ansible Automation Platform] +* xref:rhdh-warning-unable-authenticate-aap_rhdh-subscription-warnings[Unable to authenticate to Ansible Automation Platform] +* xref:rhdh-warning-invalid-aap-config_rhdh-subscription-warnings[Invalid Ansible Automation Platform configuration] +* xref:rhdh-warning-aap-ooc_rhdh-subscription-warnings[Ansible Automation Platform subscription is out of compliance] +* xref:rhdh-warning-invalid-aap-subscription_rhdh-subscription-warnings[Invalid Ansible Automation Platform subscription] + +include::devtools/proc-rhdh-warning-unable-connect-aap.adoc[leveloffset=+1] +include::devtools/proc-rhdh-warning-unable-authenticate-aap.adoc[leveloffset=+1] +include::devtools/proc-rhdh-warning-invalid-aap-config.adoc[leveloffset=+1] +include::devtools/proc-rhdh-warning-aap-ooc.adoc[leveloffset=+1] +include::devtools/proc-rhdh-warning-invalid-aap-subscription.adoc[leveloffset=+1] + +ifdef::parent-context-of-rhdh-subscription-warnings[:context: {parent-context-of-rhdh-subscription-warnings}] +ifndef::parent-context-of-rhdh-subscription-warnings[:!context:] + diff --git a/downstream/assemblies/devtools/assembly-rhdh-telemetry-capturing.adoc b/downstream/assemblies/devtools/assembly-rhdh-telemetry-capturing.adoc new file mode 100644 index 0000000000..b294799d1d --- /dev/null +++ b/downstream/assemblies/devtools/assembly-rhdh-telemetry-capturing.adoc @@ -0,0 +1,24 @@ +ifdef::context[:parent-context-of-rhdh-telemetry-capturing: {context}] +[id="rhdh-configure-telemetry_{context}"] + += {RHDH} data telemetry capturing + +{RHDH} (RHDH) sends telemetry data to Red Hat using the `backstage-plugin-analytics-provider-segment` plug-in, which is enabled by default. +This includes telemetry data from the Ansible plug-ins. + +Red Hat collects and analyzes the following data to improve your experience with {RHDH}: + +* Events of page visits and clicks on links or buttons. +* System-related information, for example, locale, timezone, user agent including browser and OS details. +* Page-related information, for example, title, category, extension name, URL, path, referrer, and search parameters. +* Anonymized IP addresses, recorded as 0.0.0.0. +* Anonymized username hashes, which are unique identifiers used solely to identify the number of unique users of the RHDH application. +* Feedback and sentiment provided in the Ansible plug-ins feedback form. + +With {RHDH}, you can disable or customize the telemetry data collection feature. +For more information, refer to the +link:{BaseURL}/red_hat_developer_hub/{RHDHVers}/html-single/administration_guide_for_red_hat_developer_hub/index#assembly-rhdh-telemetry[Telemetry data collection] +section of the _Administration guide for Red Hat Developer Hub_. + +ifdef::parent-context-of-rhdh-telemetry-capturing[:context: {parent-context-of-rhdh-telemetry-capturing}] +ifndef::parent-context-of-rhdh-telemetry-capturing[:!context:] diff --git a/downstream/assemblies/devtools/assembly-rhdh-uninstall-ocp-helm.adoc b/downstream/assemblies/devtools/assembly-rhdh-uninstall-ocp-helm.adoc new file mode 100644 index 0000000000..919c675d31 --- /dev/null +++ b/downstream/assemblies/devtools/assembly-rhdh-uninstall-ocp-helm.adoc @@ -0,0 +1,15 @@ +ifdef::context[:parent-context-of-assembly-rhdh-uninstall-ocp-helm: {context}] +[id="rhdh-uninstall-ocp-helm_{context}"] + += Uninstalling the Ansible plug-ins from a Helm installation on {OCPShort} + +:context: rhdh-uninstall-ocp-helm + +[role="_abstract"] +To uninstall the Ansible plug-ins, you must remove any software templates that use the `ansible:content:create` action from {RHDH}, and remove the plug-ins configuration from the Helm chart in OpenShift. + +include::devtools/proc-rhdh-uninstall-ocp-helm.adoc[leveloffset=+1] + +ifdef::parent-context-of-assembly-rhdh-uninstall-ocp-helm[:context: {parent-context-of-assembly-rhdh-uninstall-ocp-helm}] +ifndef::parent-context-of-assembly-rhdh-uninstall-ocp-helm[:!context:] + diff --git a/downstream/assemblies/devtools/assembly-rhdh-uninstall-ocp-operator.adoc b/downstream/assemblies/devtools/assembly-rhdh-uninstall-ocp-operator.adoc new file mode 100644 index 0000000000..e693c965ed --- /dev/null +++ b/downstream/assemblies/devtools/assembly-rhdh-uninstall-ocp-operator.adoc @@ -0,0 +1,36 @@ +ifdef::context[:parent-context-of-assembly-rhdh-uninstall-ocp-operator: {context}] +[id="rhdh-uninstall-ocp-operator{context}"] + += Uninstalling an Operator installation on {OCPShort} + +:context: rhdh-uninstall-ocp-operator + +To delete the dynamic plug-ins from your installation, you must edit the ConfigMaps +that reference Ansible. + +The deployment auto reloads when the ConfigMaps are updated. +You do not need to reload the deployment manually. + +// rhaap-dynamic-plugins-config configMap +include::devtools/proc-rhdh-uninstall-ocp-operator-plugins-cm.adoc[leveloffset=+1] + +// app-config-rhdh ConfigMap +include::devtools/proc-rhdh-uninstall-ocp-operator-rhdh-cm.adoc[leveloffset=+1] + +== Uninstalling the sidecar container + +// Remove sidecar from rhdh-custom-config + +// Do this if you need config apart from the Sidecar container to your `rhdh-custom-config` Custom Resource ConfigMap. +// `rhdh-custom-config` created in Section 3.4. + +// Remove Custom resource ConfigMap from the {RHDHShort} Operator Custom Resource +// Do this if your `rhdh-custom-config` contains only sidecar config +// `rhdh-custom-config` added to Operator CR in section 3.5 +// +include::devtools/proc-rhdh-uninstall-ocp-operator-sidecar.adoc[leveloffset=+2] +include::devtools/proc-rhdh-uninstall-ocp-remove-sidecar-cr.adoc[leveloffset=+2] + +ifdef::parent-context-of-assembly-rhdh-uninstall-ocp-operator[:context: {parent-context-of-assembly-rhdh-uninstall-ocp-operator}] +ifndef::parent-context-of-assembly-rhdh-uninstall-ocp-operator[:!context:] + diff --git a/downstream/assemblies/devtools/assembly-rhdh-uninstall.adoc b/downstream/assemblies/devtools/assembly-rhdh-uninstall.adoc new file mode 100644 index 0000000000..99262a4e00 --- /dev/null +++ b/downstream/assemblies/devtools/assembly-rhdh-uninstall.adoc @@ -0,0 +1,14 @@ +ifdef::context[:parent-context-of-assembly-rhdh-uninstall: {context}] +[id="rhdh-uninstall_{context}"] + += Uninstalling the Ansible plug-ins + +:context: rhdh-uninstall + +include::devtools/proc-rhdh-uninstall-ocp-helm.adoc[leveloffset=+1] + +include::devtools/proc-rhdh-uninstall-ocp-operator.adoc[leveloffset=+1] + +ifdef::parent-context-of-assembly-rhdh-uninstall[:context: {parent-context-of-assembly-rhdh-uninstall}] +ifndef::parent-context-of-assembly-rhdh-uninstall[:!context:] + diff --git a/downstream/assemblies/devtools/assembly-rhdh-upgrade-ocp-helm.adoc b/downstream/assemblies/devtools/assembly-rhdh-upgrade-ocp-helm.adoc new file mode 100644 index 0000000000..6222644d2c --- /dev/null +++ b/downstream/assemblies/devtools/assembly-rhdh-upgrade-ocp-helm.adoc @@ -0,0 +1,17 @@ +ifdef::context[:parent-context-of-assembly-rhdh-upgrade-ocp-helm: {context}] +[id="rhdh-upgrade-ocp-helm_{context}"] + += Upgrading the Ansible plug-ins on a Helm installation on {OCPShort} + +:context: rhdh-upgrade-ocp-helm + +[role="_abstract"] +To upgrade the Ansible plug-ins, you must update the `plugin-registry` application with the latest Ansible plug-ins files. + +include::devtools/proc-rhdh-download-plugins.adoc[leveloffset=+1] +include::devtools/proc-rhdh-update-plugin-registry.adoc[leveloffset=+1] +include::devtools/proc-rhdh-update-plugins-helm-version-numbers.adoc[leveloffset=+1] + +ifdef::parent-context-of-assembly-rhdh-upgrade-ocp-helm[:context: {parent-context-of-assembly-rhdh-upgrade-ocp-helm}] +ifndef::parent-context-of-assembly-rhdh-upgrade-ocp-helm[:!context:] + diff --git a/downstream/assemblies/devtools/assembly-rhdh-upgrade-ocp-operator.adoc b/downstream/assemblies/devtools/assembly-rhdh-upgrade-ocp-operator.adoc new file mode 100644 index 0000000000..cb5e2847ad --- /dev/null +++ b/downstream/assemblies/devtools/assembly-rhdh-upgrade-ocp-operator.adoc @@ -0,0 +1,17 @@ +ifdef::context[:parent-context-of-assembly-rhdh-upgrade-ocp-operator: {context}] +[id="rhdh-upgrade-ocp-operator_{context}"] + += Upgrading the Ansible plug-ins on an Operator installation on {OCPShort} + +:context: rhdh-upgrade-ocp-operator + +[role="_abstract"] +To upgrade the Ansible plug-ins, you must update the `plugin-registry` application with the latest Ansible plug-ins files. + +include::devtools/proc-rhdh-download-plugins.adoc[leveloffset=+1] +include::devtools/proc-rhdh-update-plugin-registry.adoc[leveloffset=+1] +include::devtools/proc-rhdh-update-plugins-operator-version-numbers.adoc[leveloffset=+1] + +ifdef::parent-context-of-assembly-rhdh-upgrade-ocp-operator[:context: {parent-context-of-assembly-rhdh-upgrade-ocp-operator}] +ifndef::parent-context-of-assembly-rhdh-upgrade-ocp-operator[:!context:] + diff --git a/downstream/assemblies/devtools/assembly-rhdh-upgrading-uninstalling.adoc b/downstream/assemblies/devtools/assembly-rhdh-upgrading-uninstalling.adoc deleted file mode 100644 index 8161fab3e2..0000000000 --- a/downstream/assemblies/devtools/assembly-rhdh-upgrading-uninstalling.adoc +++ /dev/null @@ -1,15 +0,0 @@ -ifdef::context[:parent-context: {context}] -[id="rhdh-upgrade_{context}"] - -= Upgrading and uninstalling {AAPRHDH} - -:context: rhdh-upgrade -[role="_abstract"] - -{AAPRHDH} Upgrading and uninstalling placeholder - -//include::devtools/ref-devtools-components.adoc[leveloffset=+1] - -ifdef::parent-context[:context: {parent-context}] -ifndef::parent-context[:!context:] - diff --git a/downstream/assemblies/devtools/assembly-rhdh-using.adoc b/downstream/assemblies/devtools/assembly-rhdh-using.adoc index 97f4039a2e..d44ed51792 100644 --- a/downstream/assemblies/devtools/assembly-rhdh-using.adoc +++ b/downstream/assemblies/devtools/assembly-rhdh-using.adoc @@ -1,14 +1,32 @@ ifdef::context[:parent-context: {context}] [id="rhdh-using_{context}"] -= Using {AAPRHDH} += Using the Ansible plug-ins :context: rhdh-using [role="_abstract"] +You can use {AAPRHDH} (RHDH) to learn about Ansible, create automation projects, and access opinionated workflows and tools to develop and test your automation code. +From the {RHDH} UI, you can navigate to your {PlatformNameShort} instance, where you can configure and run automation jobs. -Using the plug-in - placeholder +This document describes how to use the {AAPRHDH}. +It presents a worked example of developing a playbook project for automating updates to your firewall configuration on RHEL systems. -//include::devtools/ref-devtools-components.adoc[leveloffset=+1] +== Optional requirement + +The {AAPRHDH} link to Learning Paths on the Red{nbsp}Hat developer portal, +link:https://developers.redhat.com/learn[developers.redhat.com/learn]. + +To access the Learning Paths, you must have a Red{nbsp}Hat account and you must be able to log in to link:https://developers.redhat.com[developers.redhat.com]. + +include::devtools/ref-rhdh-dashboard.adoc[leveloffset=+1] +include::devtools/ref-rhdh-learning.adoc[leveloffset=+1] +include::devtools/ref-rhdh-discover-collections.adoc[leveloffset=+1] +include::devtools/proc-rhdh-create.adoc[leveloffset=+1] +include::devtools/proc-rhdh-view.adoc[leveloffset=+1] +include::devtools/proc-rhdh-develop-projects.adoc[leveloffset=+1] +include::devtools/proc-rhdh-develop-projects-devspaces.adoc[leveloffset=+2] +include::devtools/proc-rhdh-execute-automation-devspaces.adoc[leveloffset=+2] +include::devtools/proc-rhdh-set-up-controller-project.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/devtools/assembly-writing-running-playbook.adoc b/downstream/assemblies/devtools/assembly-writing-running-playbook.adoc index 6a4faa71aa..3f7b32f4a5 100644 --- a/downstream/assemblies/devtools/assembly-writing-running-playbook.adoc +++ b/downstream/assemblies/devtools/assembly-writing-running-playbook.adoc @@ -1,15 +1,21 @@ -ifdef::context[:parent-context: {context}] -[id="writing-running-playbook"] +ifdef::context[:parent-context_of_writing-running-playbook: {context}] +[id="writing-running-playbook_{context}"] = Writing and running a playbook with {ToolsName} :context: writing-running-playbook [role="_abstract"] -include::devtools/proc-writing-playbook.adoc[leveloffset=+1] +include::devtools/proc-devtools-set-up-ansible-config.adoc[leveloffset=+1] +include::devtools/proc-devtools-writing-first-playbook.adoc[leveloffset=+1] +include::devtools/proc-devtools-inspect-playbook.adoc[leveloffset=+1] +include::devtools/proc-devtools-run-playbook-extension.adoc[leveloffset=+1] +include::devtools/proc-devtools-extension-run-ansible-playbook.adoc[leveloffset=+2] +include::devtools/proc-devtools-extension-run-ansible-navigator.adoc[leveloffset=+2] +include::devtools/proc-devtools-working-with-ee.adoc[leveloffset=+2] include::devtools/proc-debugging-playbook.adoc[leveloffset=+1] -include::devtools/proc-running-playbook.adoc[leveloffset=+1] +include::devtools/proc-devtools-testing-playbook.adoc[leveloffset=+1] -ifdef::parent-context[:context: {parent-context}] -ifndef::parent-context[:!context:] +ifdef::parent-context_of_writing-running-playbook[:context: {parent-context_of_writing-running-playbook}] +ifndef::parent-context_of_writing-running-playbook[:!context:] diff --git a/downstream/assemblies/eda/assembly-eda-credential-types.adoc b/downstream/assemblies/eda/assembly-eda-credential-types.adoc new file mode 100644 index 0000000000..2b7900e934 --- /dev/null +++ b/downstream/assemblies/eda/assembly-eda-credential-types.adoc @@ -0,0 +1,15 @@ +[id="eda-credential-types"] + += Credential types + +{EDAcontroller} comes with several built-in credental types that you can use for syncing projects, running rulebook activations, executing job templates through {MenuTopAE} ({ControllerName}), fetching images from container registries, and processing data through event streams. + +These built-in credential types are not editable. So if you want credential types that support authentication with other systems, you can create your own credential types that can be used in your source plugins. Each credential type contains an input configuration and an injector configuration that can be passed to an Ansible rulebook to configure your sources. + +For more information, see xref:eda-custom-credential-types[Custom credential types]. +//[J. Self] Will add the cross-reference/link later. + + +include::eda/con-custom-credential-types.adoc[leveloffset=+1] +include::eda/proc-eda-set-up-credential-types.adoc[leveloffset=+1] + diff --git a/downstream/assemblies/eda/assembly-eda-credentials.adoc b/downstream/assemblies/eda/assembly-eda-credentials.adoc index 244c5f8baa..f887e6dc44 100644 --- a/downstream/assemblies/eda/assembly-eda-credentials.adoc +++ b/downstream/assemblies/eda/assembly-eda-credentials.adoc @@ -1,10 +1,15 @@ [id="eda-credentials"] -= Setting up credentials for {EDAcontroller} += Credentials + +You can use credentials to store secrets that can be used for authentication purposes with resources, such as decision environments, rulebook activations and projects for {EDAcontroller}, and projects for {ControllerName}. + +Credentials authenticate users when launching jobs against machines and importing project content from a version control system. + +You can grant users and teams the ability to use these credentials without exposing the credential to the user. If a user moves to a different team or leaves the organization, you do not have to rekey all of your systems just because that credential was previously available. -Credentials are used by {EDAName} for authentication when launching rulebooks. -include::eda/proc-eda-set-up-credential.adoc[leveloffset=+1] include::eda/con-credentials-list-view.adoc[leveloffset=+1] +include::eda/proc-eda-set-up-credential.adoc[leveloffset=+1] include::eda/proc-eda-edit-credential.adoc[leveloffset=+1] include::eda/proc-eda-delete-credential.adoc[leveloffset=+1] diff --git a/downstream/assemblies/eda/assembly-eda-decision-environments.adoc b/downstream/assemblies/eda/assembly-eda-decision-environments.adoc index ad3652facd..f7677b4c24 100644 --- a/downstream/assemblies/eda/assembly-eda-decision-environments.adoc +++ b/downstream/assemblies/eda/assembly-eda-decision-environments.adoc @@ -2,11 +2,12 @@ = Decision environments -Decision environments are a container image to run Ansible rulebooks. -They create a common language for communicating automation dependencies, and provide a standard way to build and distribute the automation environment. -The default decision environment is found in the link:https://quay.io/repository/ansible/ansible-rulebook[Ansible-Rulebook]. +Decision environments are container images that run Ansible rulebooks. +They create a common language for communicating automation dependencies, and give a standard way to build and distribute the automation environment. +You can find the default decision environment in the link:https://quay.io/repository/ansible/ansible-rulebook[Ansible-Rulebook]. -To create your own decision environment refer to xref:eda-build-a-custom-decision-environment[Building a custom decision environment for Event-Driven Ansible within Ansible Automation Platform]. +To create your own decision environment, see xref:eda-controller-install-builder[Installing ansible-builder] and xref:eda-build-a-custom-decision-environment[Building a custom decision environment for Event-Driven Ansible within Ansible Automation Platform]. +include::eda/ref-eda-controller-install-builder.adoc[leveloffset=+1] +include::eda/proc-eda-build-a-custom-decision-environment.adoc[leveloffset=+1] include::eda/proc-eda-set-up-new-decision-environment.adoc[leveloffset=+1] -include::eda/proc-eda-build-a-custom-decision-environment.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/assemblies/eda/assembly-eda-event-filter-plugins.adoc b/downstream/assemblies/eda/assembly-eda-event-filter-plugins.adoc new file mode 100644 index 0000000000..9d5852e57c --- /dev/null +++ b/downstream/assemblies/eda/assembly-eda-event-filter-plugins.adoc @@ -0,0 +1,49 @@ +[id="eda-event-filter-plugins"] + += Event filter plugins + +Events sometimes have extra data that is unnecessary and might overwhelm the rule engine. +Use event filters to remove that extra data so you can focus on what matters to your rules. +Event filters might also change the format of the data so that the rule conditions can better match the data. + +Events are defined as python code and distributed as collections. +The default link:https://github.com/ansible/event-driven-ansible/tree/main/extensions/eda/plugins/event_filter[eda collection] has the following filters: + +[cols="30%,30%",options="header"] +|==== +| Name | Description +| json_filter | This filter includes and excludes keys from the event object +| dashes_to_underscores | This filter changes the dashes in all keys in the payload to be underscore +| ansible.eda.insert_hosts_to_meta | This filter is used to add host information into the event so that ansible-rulebook can locate it and use it +| ansible.eda.normalize_keys | This filter is used if you want to change non alpha numeric keys to underscore +|==== + +You can chain event filters one after the other, and the updated data is sent from one filter to the next. +Event filters are defined in the rulebook after a source is defined. +When the rulebook starts the source plugin it associates the correct filters and transforms the data before putting it into the queue. + +.Example + +---- +sources: + - name: azure_service_bus + ansible.eda.azure_service_bus: + conn_str: "{{connection_str}}" + queue_name: "{{queue_name}}" + filters: + - json_filter: + include_keys: ['clone_url'] + exclude_keys: ['*_url', '_links', 'base', 'sender', 'owner', 'user'] + - dashes_to_underscores: +---- + +In this example the data is first passed through the `json_filter` and then through the `dashes_to_underscores` filter. +In the event payload, keys can only contain letters, numbers, and underscores. +The period (.) is used to access nested keys. + +Since every event should record the origin of the event the filter `eda.builtin.insert_meta_info` is added automatically by ansible-rulebook to add the `source name`, `type`, and `received_at`. +The `received_at` stores a date time in UTC ISO8601 format and includes the microseconds. +The `uuid` stores the unique id for the event. +The `meta key` is used to store metadata about the event and its needed to correctly report about the events in the aap-server. + +include::eda/con-eda-author-event-filters.adoc[leveloffset=+1] diff --git a/downstream/assemblies/eda/assembly-eda-logging-strategy.adoc b/downstream/assemblies/eda/assembly-eda-logging-strategy.adoc new file mode 100644 index 0000000000..26e7bffc77 --- /dev/null +++ b/downstream/assemblies/eda/assembly-eda-logging-strategy.adoc @@ -0,0 +1,11 @@ +[id="eda-logging-strategy"] + += {EDAName} logging strategy + +{EDAName} offers an audit logging solution over its resources. +Each supported create, read, update and delete (CRUD) operation is logged against rulebook activations, event streams, decision environments, projects, and activations. +Some of these resources support further operations, such as sync, enable, disable, restart, start, and stop; for these operations, logging is supported as well. +These logs are only retained for the lifecycle of its associated container. +See the following sample logs for each supported logging operation. + +include::eda/ref-eda-logging-samples.adoc[leveloffset=+1] diff --git a/downstream/assemblies/eda/assembly-eda-performance-tuning.adoc b/downstream/assemblies/eda/assembly-eda-performance-tuning.adoc new file mode 100644 index 0000000000..25d843e7d7 --- /dev/null +++ b/downstream/assemblies/eda/assembly-eda-performance-tuning.adoc @@ -0,0 +1,15 @@ +[id="eda-performance-tuning"] + += Performance tuning for {EDAcontroller} + +{EDAName} is a highly scalable, flexible automation capability. +{EDAcontroller} provides the interface in which {EDAName} automation performs. +Tune your {EDAcontroller} to optimize performance and scalability through: + +* Characterizing your workload +* System level monitoring +* Performance troubleshooting + +include::eda/con-characterizing-your-workload.adoc[leveloffset=+1] +include::eda/con-system-level-monitoring.adoc[leveloffset=+1] +include::eda/ref-performance-troubleshooting.adoc[leveloffset=+1] diff --git a/downstream/assemblies/eda/assembly-eda-projects.adoc b/downstream/assemblies/eda/assembly-eda-projects.adoc index 2bdd34c4e4..a258124017 100644 --- a/downstream/assemblies/eda/assembly-eda-projects.adoc +++ b/downstream/assemblies/eda/assembly-eda-projects.adoc @@ -6,6 +6,11 @@ Projects are a logical collection of rulebooks. They must be a git repository and only http protocol is supported. The rulebooks of a project must be located in the path defined for {EDAName} content in Ansible collections: `/extensions/eda/rulebooks` at the root of the project. +[IMPORTANT] +==== +To meet high availability demands, {EDAcontroller} shares centralized link:https://redis.io/[Redis (REmote DIctionary Server)] with the {PlatformNameShort} UI. When Redis is unavailable, you will not be able to create or sync projects. +==== + include::eda/proc-eda-set-up-new-project.adoc[leveloffset=+1] include::eda/con-eda-projects-list-view.adoc[leveloffset=+1] include::eda/proc-eda-editing-a-project.adoc[leveloffset=+1] diff --git a/downstream/assemblies/eda/assembly-eda-rulebook-activations.adoc b/downstream/assemblies/eda/assembly-eda-rulebook-activations.adoc index 9c4e992169..2af0325df6 100644 --- a/downstream/assemblies/eda/assembly-eda-rulebook-activations.adoc +++ b/downstream/assemblies/eda/assembly-eda-rulebook-activations.adoc @@ -4,7 +4,45 @@ [role="_abstract"] -A rulebook activation is a process running in the background defined by a decision environment executing a specific rulebook. +A rulebook is a set of conditional rules that {EDAName} uses to perform IT actions in an event-driven automation model. +Rulebooks are the means by which users tell {EDAName} which source to check for an event and when that event occurs what to do when certain conditions are met. + +A rulebook specifies actions to be performed when a rule is triggered. +A rule gets triggered when the events match the conditions for the rules. +The following actions are currently supported: + +* `run_playbook` (only supported with ansible-rulebook CLI) +* `run_module` +* `run_job_template` +* `run_workflow_template` +* `set_fact` +* `post_event` +* `retract_fact` +* `print_event` +* `shutdown` +* `debug` +* `none` + +To view further details, see link:https://ansible.readthedocs.io/projects/rulebook/en/stable/actions.html[Actions]. + +A rulebook activation is a process running in the background defined by a decision environment executing a specific rulebook. You can set up your rulebook activation by following xref:eda-set-up-rulebook-activation[Setting up a rulebook activation]. + +[WARNING] +==== +Red Hat does not recommend the use of a non-supported source plugin with 1 postgres database. +This can pose a potential risk to your use of {PlatformNameShort}. +==== + +[IMPORTANT] +==== +To meet high availability demands, {EDAcontroller} shares centralized link:https://redis.io/[Redis (REmote DIctionary Server)] with the {PlatformNameShort} UI. When Redis is unavailable, the following functions will not be available: + +* Creating an activation, if `is_enabled` is True +* Deleting an activation +* Enabling an activation, if not already enabled +* Disabling an activation, if not already disabled +* Restarting an activation +==== include::eda/proc-eda-set-up-rulebook-activation.adoc[leveloffset=+1] include::eda/con-eda-rulebook-activation-list-view.adoc[leveloffset=+1] @@ -13,4 +51,4 @@ include::eda/proc-eda-enable-rulebook-activations.adoc[leveloffset=+1] include::eda/proc-eda-restart-rulebook-activations.adoc[leveloffset=+1] include::eda/proc-eda-delete-rulebook-activations.adoc[leveloffset=+1] include::eda/proc-eda-activate-webhook.adoc[leveloffset=+1] -include::eda/proc-eda-test-with-K8s.adoc[leveloffset=+1] \ No newline at end of file +include::eda/proc-eda-test-with-K8s.adoc[leveloffset=+1] diff --git a/downstream/assemblies/eda/assembly-eda-rulebook-troubleshooting.adoc b/downstream/assemblies/eda/assembly-eda-rulebook-troubleshooting.adoc new file mode 100644 index 0000000000..ab54930872 --- /dev/null +++ b/downstream/assemblies/eda/assembly-eda-rulebook-troubleshooting.adoc @@ -0,0 +1,13 @@ +[id="eda-rulebook-troubleshooting"] + += Rulebook activations troubleshooting + +[role="_abstract"] + +Occasionally, rulebook activations might fail due to a variety of reasons that can be resolved. This section contains a list of possible issues and how you can resolve them. + +include::eda/proc-eda-activation-stuck-pending.adoc[leveloffset=+1] +include::eda/proc-eda-activation-keeps-restarting.adoc[leveloffset=+1] +include::eda/proc-eda-event-streams-not-sending-events.adoc[leveloffset=+1] +include::eda/proc-eda-cannot-connect-to-controller.adoc[leveloffset=+1] + diff --git a/downstream/assemblies/eda/assembly-eda-set-up-rhaap-credential.adoc b/downstream/assemblies/eda/assembly-eda-set-up-rhaap-credential.adoc new file mode 100644 index 0000000000..756b6c18af --- /dev/null +++ b/downstream/assemblies/eda/assembly-eda-set-up-rhaap-credential.adoc @@ -0,0 +1,15 @@ +[id="eda-set-up-rhaap-credential-type"] + += {PlatformName} credential + +When {EDAcontroller} is deployed on {PlatformNameShort} {PlatformVers}, you can create a {PlatformName} credential to connect to {ControllerName} through the use of an {ControllerName} URL and a username and password. After it has been created, you can attach the {PlatformName} credential to a rulebook and use it to run rulebook activations. These credentials provide a simple way to configure communication between {ControllerName} and {EDAcontroller}, enabling your rulebook activations to launch job templates. + +[NOTE] +==== +If you deployed {EDAcontroller} with {PlatformNameShort} 2.4, you probably used controller tokens to connect {ControllerName} and {EDAcontroller}. These controller tokens have been deprecated in {PlatformNameShort} {PlatformVers}. To delete deprecated controller tokens and the rulebook activations associated with them, complete the following procedures starting with xref:replacing-controller-tokens[Replacing controller tokens in {PlatformNameShort} {PlatformVers}] before proceeding with xref:eda-set-up-rhaap-credential[Setting up a {PlatformName} credential]. +==== + +include::eda/con-replacing-controller-tokens.adoc[leveloffset=+1] +include::eda/proc-eda-delete-rulebook-activations-with-cont-tokens.adoc[leveloffset=+2] +include::eda/proc-eda-delete-controller-token.adoc[leveloffset=+2] +include::eda/proc-eda-set-up-rhaap-credential.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/assemblies/eda/assembly-eda-user-guide-overview.adoc b/downstream/assemblies/eda/assembly-eda-user-guide-overview.adoc index eb204aac93..72e0cc2910 100644 --- a/downstream/assemblies/eda/assembly-eda-user-guide-overview.adoc +++ b/downstream/assemblies/eda/assembly-eda-user-guide-overview.adoc @@ -7,16 +7,32 @@ These tools monitor IT solutions and identify events and automatically implement The following procedures form the user configuration: -* xref:eda-set-up-credential[Setting up credentials] -* xref:eda-set-up-new-project[Setting up a new project] -* xref:eda-set-up-new-decision-environment[Setting up a new decision environment] -* xref:eda-set-up-token[Setting up a token to authenticate to {PlatformNameShort} Controller] -* xref:eda-set-up-rulebook-activation[Setting up a rulebook activation] +* xref:eda-credentials[Credentials] +* xref:eda-credential-types[Credential types] +* xref:eda-projects[Projects] +* xref:eda-decision-environments[Decision environments] +* xref:simplified-event-routing[Simplified event routing] +* xref:eda-set-up-rhaap-credential-type[Red Hat Ansible Automation Platform credential] +* xref:eda-rulebook-activations[Rulebook activations] +* xref:eda-rulebook-troubleshooting[Rulebook activations troubleshooting] +* xref:eda-rule-audit[Rule audit] +* xref:eda-performance-tuning[Performance tuning for {EDAcontroller}] +* xref:eda-event-filter-plugins[Event filter plugins] +* xref:eda-logging-strategy[Event-Driven Ansible logging strategy] + [NOTE] +==== +* API documentation for {EDAcontroller} is available at \https:///api/eda/v1/docs +* To meet high availability demands, {EDAcontroller} shares centralized link:https://redis.io/[Redis (REmote DIctionary Server)] with the {PlatformNameShort} UI. When Redis is unavailable, you will not be able to create or sync projects, or enable rulebook activations. ==== -API documentation for {EDAcontroller} is available at \https:///api/eda/v1/docs +[role="_additional-resources"] +.Additional resources +* For information on how to set user permissions for {EDAcontroller}, see the following in the link:{URLCentralAuth}/index[Access management and authentication guide]: -==== +. link:{URLCentralAuth}/gw-managing-access#ref-controller-user-roles[Adding roles for a user] +. link:{URLCentralAuth}/assembly-gw-roles[Roles] + +* If you plan to use {EDAName} 2.5 with a 2.4 {PlatformNameShort}, see link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.4/html/using_event-driven_ansible_2.5_with_ansible_automation_platform_2.4/index[Using Event-Driven Ansible 2.5 with Ansible Automation Platform 2.4]. diff --git a/downstream/assemblies/eda/assembly-simplified-event-routing.adoc b/downstream/assemblies/eda/assembly-simplified-event-routing.adoc new file mode 100644 index 0000000000..f3282408f1 --- /dev/null +++ b/downstream/assemblies/eda/assembly-simplified-event-routing.adoc @@ -0,0 +1,23 @@ + +[id="simplified-event-routing"] + += Simplified event routing + +Simplified event routing enables {EDAcontroller} to capture and analyze data from various remote systems using event streams. With event streams, you can send events from a remote system like GitHub or GitLab into {EDAcontroller}. You can attach 1 or more event streams to an activation by swapping out sources in a rulebook. + +Event streams are an easy way to connect your sources to your rulebooks. This capability lets you create a single endpoint to receive alerts from an event source and then use the events in multiple rulebooks. + +include::eda/con-event-streams.adoc[leveloffset=+1] +include::eda/proc-eda-create-event-stream-credential.adoc[leveloffset=+1] +include::eda/proc-eda-create-event-stream.adoc[leveloffset=+1] +include::eda/proc-eda-config-remote-sys-to-events.adoc[leveloffset=+1] +include::eda/proc-eda-verify-event-streams-work.adoc[leveloffset=+1] +include::eda/proc-eda-replace-sources-with-event-streams.adoc[leveloffset=+1] +include::eda/proc-eda-resend-webhook-data-event-streams.adoc[leveloffset=+1] +include::eda/proc-eda-check-rule-audit-event-stream.adoc[leveloffset=+1] + + + + + + diff --git a/downstream/assemblies/hub/assembly-container-user-access.adoc b/downstream/assemblies/hub/assembly-container-user-access.adoc index fe4cb7fe88..475826ac8c 100644 --- a/downstream/assemblies/hub/assembly-container-user-access.adoc +++ b/downstream/assemblies/hub/assembly-container-user-access.adoc @@ -9,17 +9,17 @@ ifdef::context[:parent-context: {context}] :context: configuring-user-access-containers [role="_abstract"] -To determine who can access and manage images in your {PlatformNameShort}, you must configure user access for container repositories in your {PrivateHubName}. +To determine who can access and manage {ExecEnvShort}s in your {PlatformNameShort}, you must configure user access for container repositories in your {PrivateHubName}. include::hub/ref-container-permissions.adoc[leveloffset=+1] include::hub/proc-create-groups.adoc[leveloffset=+1] -include::hub/proc-assigning-permissions.adoc[leveloffset=+1] +// [hherbly]: proc-assigning-permissions seems to repeat proc-create-groups.adoc include::hub/proc-assigning-permissions.adoc[leveloffset=+1] -.Additional resources +// .Additional resources -* See <> to learn more about specific permissions. +// [hherbly] LINK SHOULD BE REPLACED when we find a better one * See <> to learn more about specific permissions. -include::hub/proc-add-user-to-group.adoc[leveloffset=+1] +// [hherbly]: this module also seems redundant include::hub/proc-add-user-to-group.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] diff --git a/downstream/assemblies/hub/assembly-delete-container.adoc b/downstream/assemblies/hub/assembly-delete-container.adoc index 125111bd39..b8fac233dd 100644 --- a/downstream/assemblies/hub/assembly-delete-container.adoc +++ b/downstream/assemblies/hub/assembly-delete-container.adoc @@ -6,21 +6,21 @@ ifdef::context[:parent-context: {context}] :context: delete-container [role="_abstract"] -Delete a container repository from your {PrivateHubName} to manage your disk space. -You can delete repositories from the {PlatformName} interface in the *Container Repository* list view. +Delete a remote repository from your {PlatformNameShort} to manage your disk space. +You can delete repositories from the {PlatformName} interface in the *Execution Environment* list view. .Prerequisites * You have permissions to manage repositories. .Procedure -. Navigate to {HubName}. +. Log in to {PlatformNameShort}. . From the navigation panel, select {MenuACExecEnvironments}. -. On the container repository that you want to delete, click the btn:[More Actions] icon *{MoreActionsIcon}*, and click btn:[Delete]. -. When the confirmation message is displayed, click the checkbox and click btn:[Delete]. +. On the container repository that you want to delete, click the btn:[More Actions] icon *{MoreActionsIcon}*, and click btn:[Delete {ExecEnvShort}]. +. When the confirmation message is displayed, click the checkbox and click btn:[Delete {ExecEnvShort}]. .Verification -* Return to the *Execution Environments* list view. -If the container repository has been successfully deleted, the container repository is no longer on the list. +* Return to the *{ExecEnvName}* list view. +If the {ExecEnvName} has been successfully deleted, it will no longer be in the list. ifdef::parent-context[:context: {parent-context}] diff --git a/downstream/assemblies/hub/assembly-hub-create-api-token.adoc b/downstream/assemblies/hub/assembly-hub-create-api-token.adoc index e54c0c2a07..0f4433fcce 100644 --- a/downstream/assemblies/hub/assembly-hub-create-api-token.adoc +++ b/downstream/assemblies/hub/assembly-hub-create-api-token.adoc @@ -10,9 +10,11 @@ Before you can interact with {HubName} by uploading or downloading collections, Your method for creating the API token differs according to the type of {HubName} that you are using: -* {HubNameStart} uses Offline token management. See xref:proc-create-api-token[Creating the API token in {HubName}]. +* {HubNameStart} uses offline token management. See xref:proc-create-api-token_api-token[Creating the offline token in {HubName}]. -* {PrivateHubNameStart} uses API token management. See xref:proc-create-api-token-pah[Creating the API token in {PrivateHubName}]. +* {PrivateHubNameStart} uses API token management. See xref:proc-create-api-token-pah_api-token[Creating the API token in {PrivateHubName}]. + +* If you are using Keycloak to authenticate your {PrivateHubName}, follow the procedure for xref:proc-create-api-token_api-token[Creating the offline token in {HubName}]. include::hub/proc-create-api-token.adoc[leveloffset=+1] diff --git a/downstream/assemblies/hub/assembly-managing-cert-valid-content.adoc b/downstream/assemblies/hub/assembly-managing-cert-valid-content.adoc index 83abc0cc67..bdee3ed2f7 100644 --- a/downstream/assemblies/hub/assembly-managing-cert-valid-content.adoc +++ b/downstream/assemblies/hub/assembly-managing-cert-valid-content.adoc @@ -3,62 +3,71 @@ ifdef::context[:parent-context: {context}] [id="managing-cert-valid-content"] = Red Hat Certified, validated, and Ansible Galaxy content in automation hub -:context: managing-cert-validated-content +:context: cloud-sync [role="_abstract"] -{CertifiedName} are included in your subscription to {PlatformName}. Red Hat Ansible content includes two types of content: {CertifiedName} and {Valid}. -Using {HubNameMain}, you can access and curate a unique set of collections from all forms of Ansible content. +{CertifiedName} are included in your subscription to {PlatformName}. Using {HubNameMain}, you can access and curate a unique set of collections from all forms of Ansible content. Red Hat Ansible content contains two types of content: * {CertifiedName} * {Valid} collections -Ansible validated collections are available in your {PrivateHubName} through the Platform Installer. -When you download {PlatformName} with the bundled installer, validated content is pre-populated into the {PrivateHubName} by default, but only if you enable the {PrivateHubName} as part of the inventory. +You can use both {CertifiedName} or {Valid} collections to build your automation library. For more information on the differences between {CertifiedName} and {Valid} collections, see the Knowledgebase article link:https://access.redhat.com/support/articles/ansible-automation-platform-certified-content[{CertifiedName} and {Valid}], or xref:assembly-validated-content[{Valid}] in this guide. -If you are not using the bundle installer, you can use a Red Hat supplied Ansible playbook to install validated content. -For further information, see xref:assembly-validated-content[{Valid}]. +// hherbly--removed, see aap-20548 +// Ansible validated collections are available in your {PrivateHubName} through the platform installer. +// When you download {PlatformName} with the bundled installer, validated content is pre-populated into the {PrivateHubName} by default, but only if you enable the {PrivateHubName} as part of the inventory. + +// If you are not using the bundle installer, you can use a Red Hat supplied Ansible playbook to install validated content. + +// For further information, see xref:assembly-validated-content[{Valid}]. You can update these collections manually by downloading their packages. -[discrete] -== Why certify Ansible collections? +//hherbly: removing as this is specific to partners, not a general user audience. see aap-20548 + +// [discrete] +// == Why certify Ansible collections? + +// The Ansible certification program represents a shared statement of support for {CertifiedCon} between Red Hat and the ecosystem partner. +// An end customer experiencing trouble with Ansible and certified partner content can, for example, open a support ticket describing a request for information, or a problem with Red Hat, and expect the ticket to be resolved by Red Hat and the ecosystem partner. + +// Red Hat offers go-to-market benefits for Certified Partners to grow market awareness, generate demand, and sell collaboratively. + +// Red Hat {CertifiedName} are distributed through {HubNameMain} (subscription required), a centralized repository for jointly supported Ansible Content. +// As a certified partner, publishing collections to {HubNameMain} gives end customers the power to manage how trusted automation content is used in their production environment with a well-known support life cycle. + +// For more information about getting started with certifying a solution, see link:https://connect.redhat.com/en/partner-with-us/red-hat-ansible-automation-certification[Red Hat Partner Resources]. -The Ansible certification program enables a shared statement of support for {CertifiedCon} between Red Hat and the ecosystem partner. -An end customer, experiencing trouble with Ansible and certified partner content, can open a support ticket, for example, a request for information, or a problem with Red Hat, and expect the ticket to be resolved by Red Hat and the ecosystem partner. +// [discrete] +// == How do I get a collection certified? -Red Hat offers go-to-market benefits for Certified Partners to grow market awareness, generate demand, and sell collaboratively. +// For instructions on certifying your collection, see the Ansible certification policy guide on link:http://www.ansible.com/partners[Red Hat Partner Connect]. -Red Hat {CertifiedName} are distributed through {HubNameMain} (subscription required), a centralized repository for jointly supported Ansible Content. -As a certified partner, publishing collections to {HubNameMain} provides end customers the power to manage how trusted automation content is used in their production environment with a well-known support life cycle. +// [discrete] +// == How does the joint support agreement on Certified Collections work? -For more information about getting started with certifying a solution, see link:https://connect.redhat.com/en/partner-with-us/red-hat-ansible-automation-certification[Red Hat Partner Connect]. +// If a customer raises an issue with the Red Hat support team about a certified collection, Red Hat support assesses the issue and checks whether the problem is with Ansible or Ansible usage. +// They also check whether the issue is with a certified collection. +// If there is a problem with the certified collection, support teams transfer the issue to the vendor owner of the certified collection through an agreed-upon tool such as TSANet. -[discrete] -== How do I get a collection certified? +// [discrete] +// == Can I create and certify a collection containing only Ansible Roles? -For instructions on certifying your collection, see the Ansible certification policy guide on link:http://www.ansible.com/partners[Red Hat Partner Connect]. +// You can create and certify collections that contain only roles. +// Current testing requirements are focused on collections containing modules, and additional resources are currently in progress for testing collections containing only roles. +// Contact ansiblepartners@redhat.com for more information. -[discrete] -== How does the joint support agreement on Certified Collections work? +You can use {HubNameMain} to distribute the relevant {CertifiedColl}s to your users by creating a requirements file or a synclist. Use a requirements file to install collections to your {HubName}, as synclists can only be managed by users with platform administrator privileges. -If a customer raises an issue with the Red Hat support team about a certified collection, Red Hat support assesses the issue and checks whether the problem exists within Ansible or Ansible usage. -They also check whether the issue is with a certified collection. -If there is a problem with the certified collection, support teams transfer the issue to the vendor owner of the certified collection through an agreed upon tool such as TSANet. +Before you can use a requirements file to install content, you must: -[discrete] -== Can I create and certify a collection containing only Ansible Roles? +. xref:token-management-hub_cloud-sync[Obtain an automation hub API token] +. xref:proc-set-rhcertified-remote_cloud-sync[Use the API token to configure a remote repository in your local hub] +. Then, xref:create-requirements-file_cloud-sync[Create a requirements file]. -You can create and certify collections that contain only roles. -Current testing requirements are focused on collections containing modules, and additional resources are currently in progress for testing collections only containing roles. -Contact ansiblepartners@redhat.com for more information. -include::assembly-synclists.adoc[leveloffset=+1] -include::assembly-syncing-to-cloud-repo.adoc[leveloffset=+1] -include::assembly-collections-and-content-signing-in-pah.adoc[leveloffset=+1] -//include::assembly-faq.adoc[leveloffset=+1] -include::assembly-validated-content.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/hub/assembly-managing-collections-hub.adoc b/downstream/assemblies/hub/assembly-managing-collections-hub.adoc index 75b4cc4271..bebe340e14 100644 --- a/downstream/assemblies/hub/assembly-managing-collections-hub.adoc +++ b/downstream/assemblies/hub/assembly-managing-collections-hub.adoc @@ -6,21 +6,15 @@ ifdef::context[:parent-context: {context}] :context: managing-collections-hub [role="_abstract"] -As a content creator, you can use namespaces in {HubName} to curate and manage collections for the following purposes: +As a content creator, you can use namespaces in {HubName} to curate and manage collections. For example, you can: -* Create groups with permissions to curate namespaces and upload collections to {PrivateHubName} +* Create teams with permissions to curate namespaces and upload collections to {PrivateHubName} * Add information and resources to the namespace to help end users of the collection in their automation tasks * Upload collections to the namespace * Review the namespace import logs to determine the success or failure of uploading the collection and its current approval status -For information on creating content, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_creator_guide/index[{PlatformName} Creator Guide]. +For information on creating content, see link:{LinkDevelopAutomationContent}. -include::assembly-working-with-namespaces.adoc[leveloffset=+1] -include::assembly-managing-private-collections.adoc[leveloffset=+1] -include::assembly-repo-management.adoc[leveloffset=+1] -include::assembly-remote-management.adoc[leveloffset=+2] -include::assembly-repo-sync.adoc[leveloffset=+2] -include::assembly-collection-import-export.adoc[leveloffset=+2] ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/hub/assembly-managing-container-registry.adoc b/downstream/assemblies/hub/assembly-managing-container-registry.adoc index 4f8eb57a16..d9eb675c12 100644 --- a/downstream/assemblies/hub/assembly-managing-container-registry.adoc +++ b/downstream/assemblies/hub/assembly-managing-container-registry.adoc @@ -2,14 +2,13 @@ ifdef::context[:parent-context: {context}] - [id="managing-container-registry"] -= Manage your {PrivateHubName} container registry += Manage your {PrivateHubName} remote registry :context: managing-container-registry [role="_abstract"] -Manage container image repositories in your {PlatformNameShort} infrastructure by using the {HubName} container registry. +Manage container image repositories in your {PlatformNameShort} infrastructure by using the {HubName} remote registry. You can perform the following tasks with {HubNameStart}: * Control who can access individual container repositories @@ -17,8 +16,6 @@ You can perform the following tasks with {HubNameStart}: * View activity and image layers * Provide additional information related to each container repository - - //// The following include statements pull in the module files that comprise the assembly. Include any combination of concept, procedure, or reference modules required to cover the user story. You can also include other assemblies. //// diff --git a/downstream/assemblies/hub/assembly-managing-containers-hub.adoc b/downstream/assemblies/hub/assembly-managing-containers-hub.adoc index 97a22e8482..f6d6ab33c2 100644 --- a/downstream/assemblies/hub/assembly-managing-containers-hub.adoc +++ b/downstream/assemblies/hub/assembly-managing-containers-hub.adoc @@ -6,15 +6,8 @@ ifdef::context[:parent-context: {context}] :context: managing-containers [role="_abstract"] -Learn the administrator workflows and processes for configuring {PrivateHubName} container registry and repositories. +Learn the administrator workflows and processes for configuring the {PrivateHubName} remote registry and repositories. -include::assembly-managing-container-registry.adoc[leveloffset=+1] -include::assembly-container-user-access.adoc[leveloffset=+1] -include::assembly-populate-container-registry.adoc[leveloffset=+1] -include::assembly-setup-container-repository.adoc[leveloffset=+1] -include::assembly-pull-image.adoc[leveloffset=+1] -include::assembly-working-with-signed-containers.adoc[leveloffset=+1] -include::assembly-delete-container.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/hub/assembly-managing-private-collections.adoc b/downstream/assemblies/hub/assembly-managing-private-collections.adoc index d27fe5926c..720329e5c9 100644 --- a/downstream/assemblies/hub/assembly-managing-private-collections.adoc +++ b/downstream/assemblies/hub/assembly-managing-private-collections.adoc @@ -4,7 +4,7 @@ Use {HubName} to manage and publish content collections developed within your organization. You can upload and group collections in namespaces. They need administrative approval to appear in the *Published* content repository. After you publish a collection, your users can access and download it for use. -You can reject submitted collections that do not meet organizational certification criteria. +You can also reject submitted collections that do not meet organizational certification criteria. include::hub/con-approval.adoc[leveloffset=+1] diff --git a/downstream/assemblies/hub/assembly-populate-container-registry.adoc b/downstream/assemblies/hub/assembly-populate-container-registry.adoc index 31d00049c2..6ef563750c 100644 --- a/downstream/assemblies/hub/assembly-populate-container-registry.adoc +++ b/downstream/assemblies/hub/assembly-populate-container-registry.adoc @@ -10,27 +10,43 @@ ifdef::context[:parent-context: {context}] [role="_abstract"] -By default, {PrivateHubName} does not include container images. -To populate your container registry, you must push a container image to it. +By default, {PrivateHubName} does not include {ExecEnvName}. +To populate your container registry, you must push an {ExecEnvShort} to it. -You must follow a specific workflow to populate your {PrivateHubName} container registry: +You must follow a specific workflow to populate your {PrivateHubName} remote registry: -* Pull images from the Red Hat Ecosystem Catalog (registry.redhat.io) +* Pull {ExecEnvName} from the Red Hat Ecosystem Catalog (registry.redhat.io) * Tag them -* Push them to your {PrivateHubName} container registry +* Push them to your {PrivateHubName} remote registry [IMPORTANT] ==== -Image manifests and filesystem blobs were both originally served directly from `registry.redhat.io` and `registry.access.redhat.com`. -As of 1 May 2023, filesystem blobs are served from `quay.io` instead. +As of *April 1st, 2025*, `quay.io` is adding three additional endpoints. As a result, customers must adjust the allow/block lists within their firewall systems lists to include the following endpoints: -* Ensure that the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_planning_guide/ref-network-ports-protocols_planning[Network ports and protocols] listed in _Table 5.10. Execution Environments (EE)_ are available to avoid problems pulling container images. +* `cdn04.quay.io` +* `cdn05.quay.io` +* `cdn06.quay.io` -Make this change to any firewall configuration that specifically enables outbound connections to `registry.redhat.io` or `registry.access.redhat.com`. +To avoid problems pulling container images, customers must allow outbound TCP connections (ports 80 and 443) to the following hostnames: -Use the hostnames instead of IP addresses when configuring firewall rules. +* `cdn.quay.io` +* `cdn01.quay.io` +* `cdn02.quay.io` +* `cdn03.quay.io` +* `cdn04.quay.io` +* `cdn05.quay.io` +* `cdn06.quay.io` + +This change should be made to any firewall configuration that specifically enables outbound connections to `registry.redhat.io` or `registry.access.redhat.com`. + +Use the hostnames instead of IP addresses when configuring firewall rules. + +After making this change, you can continue to pull images from `registry.redhat.io` or `registry.access.redhat.com`. You do not require a `quay.io` login, or need to interact with the `quay.io` registry directly in any way to continue pulling Red Hat container images. + +For more information, see link:https://access.redhat.com/articles/7084334[Firewall changes for container image pulls 2024/2025]. + +Ensure that the link:{URLPlanningGuide}/ref-network-ports-protocols_planning[Network ports and protocols] listed in _Table 6.4. Execution Environments (EE)_ are available to avoid problems pulling container images. -After making this change you can continue to pull images from `registry.redhat.io` and `registry.access.redhat.com`. You do not require a `quay.io` login, or need to interact with the `quay.io` registry directly in any way to continue pulling Red Hat container images. ==== include::hub/proc-obtain-images.adoc[leveloffset=+1] diff --git a/downstream/assemblies/hub/assembly-pull-image.adoc b/downstream/assemblies/hub/assembly-pull-image.adoc index dad521a92f..97b1ac355d 100644 --- a/downstream/assemblies/hub/assembly-pull-image.adoc +++ b/downstream/assemblies/hub/assembly-pull-image.adoc @@ -6,9 +6,9 @@ ifdef::context[:parent-context: {context}] :context: pulling-images-container-repository [role="_abstract"] -Pull images from the {HubName} container registry to make a copy to your local machine. -{HubNameStart} provides the `podman pull` command for each `latest` image in the container repository. -You can copy and paste this command into your terminal, or use `podman pull` to copy an image based on an image tag. +Pull {ExecEnvName} from the {HubName} remote registry to make a copy to your local machine. +{HubNameStart} provides the `podman pull` command for each `latest` {ExecEnvName} in the container repository. +You can copy and paste this command into your terminal, or use `podman pull` to copy an {ExecEnvName} based on an {ExecEnvName} tag. include::hub/proc-pull-image.adoc[leveloffset=+1] include::hub/proc-sync-image.adoc[leveloffset=+1] diff --git a/downstream/assemblies/hub/assembly-repo-management.adoc b/downstream/assemblies/hub/assembly-repo-management.adoc index 220acc8950..e98835d1c0 100644 --- a/downstream/assemblies/hub/assembly-repo-management.adoc +++ b/downstream/assemblies/hub/assembly-repo-management.adoc @@ -6,7 +6,7 @@ ifdef::context[:parent-context: {context}] :context: repo-management [role="_abstract"] -As an {HubName} administrator, you can create, edit, delete, and move automation content collections between repositories. +As a platform administrator, you can create, edit, delete, and move automation content collections between repositories. == Types of repositories in automation hub @@ -14,9 +14,9 @@ In {HubName} you can publish collections to two types of repositories, depending Staging repositories:: Any user with permission to upload to a namespace can publish collections into these repositories. Collections in these repositories are not available in the search page. Instead, they are displayed on the approval dashboard for an administrator to verify. Staging repositories are marked with the `pipeline=staging` label. -Custom repositories:: Any user with write permissions on the repository can publish collections to these repositories. Custom repositories can be public where all users can see them, or private where only users with view permissions can see them. These repositories are not displayed on the approval dashboard. If the repository owner enables search, the collection can appear in search results. +Custom repositories:: Any user with write permissions on the repository can publish collections to these repositories. Custom repositories can be public where all users can see them, or private where only users with view permissions can see them. These repositories are not displayed on the approval dashboard. If the repository owner enables search, the collection can appear in search results. -By default, {HubName} ships with one staging repository that is automatically used when a repository is not specified for uploading collections. Users can create new staging repositories during xref:proc-create-repository[repository creation]. +By default, {HubName} includes one staging repository that is automatically used when a repository is not specified for uploading collections. Users can create new staging repositories during xref:proc-create-repository[repository creation]. include::hub/con-approval-pipeline.adoc[leveloffset=+1] include::hub/con-repo-rbac.adoc[leveloffset=+1] diff --git a/downstream/assemblies/hub/assembly-setup-container-repository.adoc b/downstream/assemblies/hub/assembly-setup-container-repository.adoc index 09af70eba6..62516cdbb4 100644 --- a/downstream/assemblies/hub/assembly-setup-container-repository.adoc +++ b/downstream/assemblies/hub/assembly-setup-container-repository.adoc @@ -7,17 +7,15 @@ ifdef::context[:parent-context: {context}] [id="setting-up-container-repository"] = Setting up your container repository - :context: assembly-keyword - [role="_abstract"] -When you set up your container repository, you must add a description, include a README, add groups that can access the repository, and tag images. +When you set up your container repository, you must add a description, include a README, add teams that can access the repository, and tag {ExecEnvName}. -== Prerequisites to setting up your container registry +== Prerequisites to setting up your remote registry -* You are logged in to a {PrivateHubName}. +* You are logged in to {PlatformNameShort}. * You have permissions to change the repository. diff --git a/downstream/assemblies/hub/assembly-syncing-to-cloud-repo.adoc b/downstream/assemblies/hub/assembly-syncing-to-cloud-repo.adoc index 7b3ffb4ba2..3a86d82d40 100644 --- a/downstream/assemblies/hub/assembly-syncing-to-cloud-repo.adoc +++ b/downstream/assemblies/hub/assembly-syncing-to-cloud-repo.adoc @@ -1,26 +1,29 @@ [id="assembly-creating-tokens-in-automation-hub"] = Configuring {HubNameMain} remote repositories to synchronize content +:context: cloud-sync + Use remote configurations to configure your {PrivateHubName} to synchronize with {CertifiedName} hosted on `{Console}` or with your collections in {Galaxy}. [IMPORTANT] ==== -As of the 2.4 release you can still synchronize content, but synclists are deprecated, and will be removed in a future version. +To synchronize content, you can now upload a manually-created requirements file from the rh-certified remote. Remotes are configurations that allow you to synchronize content to your custom repositories from an external collection source. -To synchronize content, you can now upload a manually-created requirements file from the rh-certified remote. - -Remotes are configurations that allow you to synchronize content to your custom repositories from an external collection source. +As of the 2.4 release you can still synchronize content, but synclists are deprecated, and will be removed in a future version. ==== +Each remote configuration located in {MenuACAdminRemotes} provides information for both the *community* and *rh-certified* repository about when the repository was *last updated*. +You can add new content to {HubNameMain} at any time using the *Edit* and *Sync* features included on the {MenuACAdminRepositories} page. + [discrete] -== What’s the difference between {Galaxy} and {HubNameMain}? +== What's the difference between {Galaxy} and {HubNameMain}? Collections published to {Galaxy} are the latest content published by the Ansible community and have no joint support claims associated with them. -{Galaxy} is the recommended frontend directory for the Ansible community accessing content. +{Galaxy} is the recommended frontend directory for the Ansible community to access content. -Collections published to {HubNameMain} are targeted for joint customers of Red Hat and selected partners. +Collections published to {HubNameMain} are targeted to joint customers of Red Hat and selected partners. Customers need an Ansible subscription to access and download collections on {HubNameMain}. -A certified collection means that Red Hat and partners have a strategic relationship in place and are ready to support joint customers, and may have had additional testing and validation done against them. +A certified collection means that Red Hat and partners have a strategic relationship in place and are ready to support joint customers, and that the collections may have had additional testing and validation done against them. [discrete] == How do I request a namespace on {Galaxy}? @@ -37,16 +40,23 @@ After users are added as administrators of the namespace, you can use the self-s [discrete] == Are there any restrictions for {Galaxy} namespace naming? -Collection namespaces must follow python module name convention. +Collection namespaces must follow Python module name convention. This means collections should have short, all lowercase names. You can use underscores in the collection name if it improves readability. -include::hub/con-remote-repos.adoc[leveloffset=+1] +// [hherbly: there's only a couple of sentences in this concept module, and they make more sense at the beginning of this assembly. Moving this content to line 15] include::hub/con-remote-repos.adoc[leveloffset=+1] + +// [hherbly: replacing this with the 4 modules below from the Getting started with hub guide include::hub/proc-obtaining-org-collection-url.adoc[leveloffset=+1] -include::hub/proc-obtaining-org-collection-url.adoc[leveloffset=+1] +include::hub/con-token-management-hub.adoc[leveloffset=+1] +include::hub/proc-create-api-token.adoc[leveloffset=+1] +include::hub/proc-create-api-token-pah.adoc[leveloffset=+1] +include::hub/con-offline-token-active.adoc[leveloffset=+1] include::hub/proc-set-rhcertified-remote.adoc[leveloffset=+1] include::hub/proc-set-community-remote.adoc[leveloffset=+1] include::hub/proc-configure-proxy-remote.adoc[leveloffset=+1] + +include::hub/proc-create-requirements-file.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/assemblies/hub/assembly-synclists.adoc b/downstream/assemblies/hub/assembly-synclists.adoc index 6b2a651c7e..be1694bfe2 100644 --- a/downstream/assemblies/hub/assembly-synclists.adoc +++ b/downstream/assemblies/hub/assembly-synclists.adoc @@ -3,14 +3,11 @@ [IMPORTANT] ==== -As of the 2.4 release you can still synchronize content, but synclists are deprecated, and will be removed in a future version. - To synchronize content, you can now upload a manually-created requirements file from the rh-certified remote. - Remotes are configurations that enable you to synchronize content to your custom repositories from an external collection source. -==== -You can use {HubNameMain} to distribute the relevant {CertifiedColl}s to your users by creating synclists or a requirements file. For more information about using requirements files, see link:https://docs.ansible.com/ansible/latest/collections_guide/collections_installing.html#install-multiple-collections-with-a-requirements-file[Install multiple collections with a requirements file] in the _Using Ansible collections_ guide. +As of the 2.4 release you can still synchronize content, but synclists are deprecated, and will be removed in a future version. +==== include::hub/con-rh-certified-synclist.adoc[leveloffset=+1] include::hub/proc-create-synclist.adoc[leveloffset=+1] diff --git a/downstream/assemblies/hub/assembly-validated-content.adoc b/downstream/assemblies/hub/assembly-validated-content.adoc index e19d2615eb..5afc6337d6 100644 --- a/downstream/assemblies/hub/assembly-validated-content.adoc +++ b/downstream/assemblies/hub/assembly-validated-content.adoc @@ -7,58 +7,63 @@ == Configuring validated collections with the installer -When you download and run the bundle installer, certified and validated collections are automatically uploaded. +When you download and run the RPM bundle installer, certified and validated collections are automatically uploaded. Certified collections are uploaded into the `rh-certified` repository. Validated collections are uploaded into the `validated` repository. -You can change to default configuration by using two variables: +You can change the default configuration by using two variables: * `automationhub_seed_collections` is a boolean that defines whether or not preloading is enabled. -* `automationhub_collection_seed_repository`. A variable that enables you to specify the type of content to upload when it is set to `true`. +* `automationhub_collection_seed_repository`is a variable that enables you to specify the type of content to upload when it is set to `true`. Possible values are `certified` or `validated`. -If missing both content sets will be uploaded. +If this variable is missing, both content sets will be uploaded. -== Installing validated content using the tarball - -If you are not using the bundle installer, you can use a standalone tarball, `ansible-validated-content-bundle-1.tar.gz`. -You can also use this standalone tarball later to update validated contents in any environment, when a newer tarball becomes available, without having to re-run the bundle installer. +[NOTE] +==== +Changing the default configuration may require further platform configuration changes for other content you may use. +==== -.Prerequisites -You require the following variables to run the playbook. +// == Installing validated content using the tarball -[cols="50%,50%",options="header"] -|==== -| Name | Description -| *`automationhub_admin_password`* | Your administration password. -| *`automationhub_api_token`* | The API token generated for your {HubName}. -| *`automationhub_main_url`* | For example, `\https://automationhub.example.com` -| *`automationhub_require_content_approval`* | Boolean (`true` or `false`) +// If you are not using the bundle installer, you can use a standalone .tar file, `ansible-validated-content-bundle-1.tar.gz`. +// You can also use this standalone .tar file later to update validated contents in any environment, when a newer .tar file becomes available, without having to re-run the bundle installer. -This must match the value used during {HubName} deployment. +// .Prerequisites +// Use the following required variables to run the playbook. -This variable is set to `true` by the installer. -|==== +// [cols="50%,50%",options="header"] +// |==== +// | Name | Description +// | *`automationhub_admin_password`* | Your administration password. +// | *`automationhub_api_token`* | The API token generated for your {HubName}. +// | *`automationhub_main_url`* | For example, `\https://automationhub.example.com` +// | *`automationhub_require_content_approval`* | Boolean (`true` or `false`) +// +// This must match the value used during {HubName} deployment. +// +// This variable is set to `true` by the installer. +// |==== -.Procedure -. To obtain the tarball, navigate to the link:{PlatformDownloadUrl}[{PlatformName} download] page and select *Ansible Validated Content*. -. Upload the content and define the variables (this example uses `automationhub_api_token`): -+ -[options="nowrap" subs="+quotes,attributes"] ----- -ansible-playbook collection_seed.yml --e automationhub_api_token= --e automationhub_main_url=https://automationhub.example.com --e automationhub_require_content_approval=true ----- -+ -[NOTE] -==== -Use either `automationhub_admin_password` or `automationhub_api_token`, not both. -==== +// .Procedure +// . To obtain the .tar file, navigate to the link:{PlatformDownloadUrl}[{PlatformName} download] page and select // *Ansible Validated Content*. +// . Upload the content and define the variables (this example uses `automationhub_api_token`): +// + +// [options="nowrap" subs="+quotes,attributes"] +// ---- +// ansible-playbook collection_seed.yml +// -e automationhub_api_token= +// -e automationhub_main_url=https://automationhub.example.com +// -e automationhub_require_content_approval=true +// ---- +// + +// [NOTE] +// ==== +// Use either `automationhub_admin_password` or `automationhub_api_token`, not both. +// ==== -When complete, the collections are visible in the validated collection section of {PrivateHubName}. -Users can now view and download collections from your {PrivateHubName}. +// When complete, the collections are visible in the validated collection section of {PrivateHubName}. +// Users can now view and download collections from your {PrivateHubName}. -[role="_additional-resources"] -.Additional Resources -For more information on running ansible playbooks, see link:https://docs.ansible.com/ansible/latest/cli/ansible-playbook.html[ansible-playbook]. +// [role="_additional-resources"] +// .Additional Resources +// For more information on running ansible playbooks, see link:https://docs.ansible.com/ansible/latest/cli/ansible-playbook.html[ansible-playbook]. diff --git a/downstream/assemblies/hub/assembly-working-with-namespaces.adoc b/downstream/assemblies/hub/assembly-working-with-namespaces.adoc index 42e77c6a17..8c72dd9b26 100644 --- a/downstream/assemblies/hub/assembly-working-with-namespaces.adoc +++ b/downstream/assemblies/hub/assembly-working-with-namespaces.adoc @@ -2,11 +2,11 @@ = Using namespaces to manage collections in {HubName} -Namespaces are unique locations in {HubName} to which you can upload and publish content collections. Access to namespaces in {HubName} is governed by groups with permission to manage the content and related information that appears there. +Namespaces are unique locations in {HubName} to which you can upload and publish content collections. Access to namespaces in {HubName} is governed by teams with permission to manage the content and related information that appears there. You can use namespaces in {HubName} to organize collections developed within your organization for internal distribution and use. -If you are working with namespaces, you must have a group that has permissions to create, edit and upload collections to namespaces. Collections uploaded to a namespace require administrative approval before you can publish them and make them available for use. +If you are working with namespaces, you must have a team that has permissions to create, edit and upload collections to namespaces. Collections uploaded to a namespace require administrative approval before you can publish them and make them available for use. include::hub/proc-create-content-developers.adoc[leveloffset=+1] @@ -14,7 +14,7 @@ include::hub/proc-create-namespace.adoc[leveloffset=+1] include::hub/proc-edit-namespace.adoc[leveloffset=+1] -When you create a namespace, groups with permissions to upload to it can start adding their collections for approval. Collections in the namespace appear in the *Published* repository after approval. +When you create a namespace, teams with permissions to upload to it can start adding their collections for approval. Collections in the namespace appear in the *Published* repository after approval. include::hub/proc-uploading-collections.adoc[leveloffset=+1] diff --git a/downstream/assemblies/hub/assembly-working-with-signed-containers.adoc b/downstream/assemblies/hub/assembly-working-with-signed-containers.adoc index b622914bff..c0d0485de2 100644 --- a/downstream/assemblies/hub/assembly-working-with-signed-containers.adoc +++ b/downstream/assemblies/hub/assembly-working-with-signed-containers.adoc @@ -9,7 +9,7 @@ ifdef::context[:parent-context: {context}] :context: working-with-signed-containers -{ExecEnvNameStart} are container images used by Ansible {ControllerName} to run jobs. +{ExecEnvNameStart} are container images used by {PlatformNameShort} to run jobs. You can download this content to {PrivateHubName}, and publish it within your organization. include::hub/proc-deploying-your-system-for-container-signing.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-HA-redis.adoc b/downstream/assemblies/platform/assembly-HA-redis.adoc new file mode 100644 index 0000000000..5691865fdd --- /dev/null +++ b/downstream/assemblies/platform/assembly-HA-redis.adoc @@ -0,0 +1,29 @@ +:_mod-docs-content-type: ASSEMBLY + +[id="HA-redis_{context}"] + += Caching and queueing system + +In {PlatformNameShort} {PlatformVers}, link:https://redis.io/[Redis (REmote DIctionary Server)] is used as the caching and queueing system. Redis is an open source, in-memory, NoSQL key/value store that is used primarily as an application cache, quick-response database and lightweight message broker. + +Centralized Redis is provided for the {Gateway} and {EDAName} and shared between those components. {ControllerNameStart} and {HubName} have their own instances of Redis. + +This cache and queue system stores data in memory, rather than on a disk or solid-state drive (SSD), which helps deliver speed, reliability, and performance. In {PlatformNameShort}, the system caches the following types of data for the various services in {PlatformNameShort}: + +.Data types cached by Centralized Redis +[options="header"] +|==== +| {ControllerNameStart} | {EDAName} server | {HubNameStart} | {GatewayStart} +| N/A {ControllerName} does not use shared Redis in {PlatformNameShort} {PlatformVers} | Event queues | N/A {HubName} does not use shared Redis in {PlatformNameShort} {PlatformVers} | Settings, Session Information, JSON Web Tokens +|==== + +This data can contain sensitive Personal Identifiable Information (PII). Your data is protected through secure communication with the cache and queue system through both Transport Layer Security (TLS) encryption and authentication. + +[NOTE] +==== +The data in Redis from both the {Gateway} and {EDAName} are partitioned; therefore, neither service can access the other’s data. +==== + +include::platform/con-gw-centralized-redis.adoc[leveloffset=+1] +include::platform/con-gw-clustered-redis.adoc[leveloffset=+1] +include::platform/con-gw-single-node-redis.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-aap-activate.adoc b/downstream/assemblies/platform/assembly-aap-activate.adoc index f2131b15c5..9e9624362e 100644 --- a/downstream/assemblies/platform/assembly-aap-activate.adoc +++ b/downstream/assemblies/platform/assembly-aap-activate.adoc @@ -15,4 +15,4 @@ include::platform/proc-aap-activate-with-credentials.adoc[leveloffset=+1] include::platform/proc-aap-activate-with-manifest.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] -ifndef::parent-context[:!context:] +ifndef::parent-context[:!context:] \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-aap-architecture.adoc b/downstream/assemblies/platform/assembly-aap-architecture.adoc index eda9268de0..8ef518f6ec 100644 --- a/downstream/assemblies/platform/assembly-aap-architecture.adoc +++ b/downstream/assemblies/platform/assembly-aap-architecture.adoc @@ -2,6 +2,12 @@ [id='aap_architecture'] = {PlatformName} Architecture -As a modular platform, {PlatformNameShort} provides the flexibility to easily integrate components and customize your deployment to best meet your automation requirements. The following section provides a comprehensive architectural example of an {PlatformNameShort} deployment. +Deploy all components of {PlatformNameShort} so that all features and capabilities are available for use without the need to take further action. + +Red Hat tests the installation of {PlatformNameShort} {PlatformVers} based on a defined set of infrastructure topologies or reference architectures. Enterprise organizations can use one of the enterprise topologies for production deployments to ensure the highest level of uptime, performance, and continued scalability. Organizations or deployments that are resource constrained can use a "growth" topology. + +The following section provides a comprehensive architectural example of an {PlatformNameShort} deployment. include::platform/con-aap-example-architecture.adoc[leveloffset=+1] +include::platform/ref-example-CONT-architecture.adoc[leveloffset=+1] +include::platform/ref-example-OCP-architecture.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-aap-backup.adoc b/downstream/assemblies/platform/assembly-aap-backup.adoc index 36b77815cb..4004b28dae 100644 --- a/downstream/assemblies/platform/assembly-aap-backup.adoc +++ b/downstream/assemblies/platform/assembly-aap-backup.adoc @@ -9,13 +9,18 @@ ifdef::context[:parent-context: {context}] [role="_abstract"] -Backing up your {PlatformName} deployment involves creating backup resources for your deployed {HubName} and {ControllerName} instances. Use these procedures to create backup resources for your {PlatformName} deployment. +Backing up your {PlatformName} deployment involves creating backup resources for your deployed instances. +Use the following procedures to create backup resources for your {PlatformName} deployment. +We recommend taking backups before upgrading the {OperatorPlatformNameShort}. +Take a backup regularly in case you want to restore the platform to a previous state. + -//part of 2.5 release, (AAP-22178) uncomment when publishing [gmurray] include::platform/proc-aap-platform-gateway-backup.adoc[leveloffset=+1] include::platform/proc-aap-controller-backup.adoc[leveloffset=+1] +include::platform/proc-aap-controller-yaml-backup.adoc[leveloffset=+1] + include::platform/proc-aap-hub-backup.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] diff --git a/downstream/assemblies/platform/assembly-aap-containerized-installation.adoc b/downstream/assemblies/platform/assembly-aap-containerized-installation.adoc index bb08181afc..975f415f23 100644 --- a/downstream/assemblies/platform/assembly-aap-containerized-installation.adoc +++ b/downstream/assemblies/platform/assembly-aap-containerized-installation.adoc @@ -15,45 +15,72 @@ endif::[] :context: aap-containerized-installation [role="_abstract"] -Ansible Automation Platform is a commercial offering that helps teams manage complex multi-tier deployments by adding control, knowledge, and delegation to Ansible-powered environments. +{PlatformNameShort} is a commercial offering that helps teams manage complex multi-tier deployments by adding control, knowledge, and delegation to Ansible-powered environments. -This guide helps you to understand the installation requirements and processes behind our new containerized version of Ansible Automation Platform. This initial version is based upon {PlatformNameShort} 2.4 and is being released as a Technical Preview. Please see link:https://access.redhat.com/support/offerings/techpreview[Technology Preview Features Support Scope] to understand what a technical preview entails. +This guide helps you to understand the installation requirements and processes behind the containerized version of {PlatformNameShort}. +[NOTE] +==== +include::snippets/container-upgrades.adoc[] +==== .Prerequisites +* A host running {RHEL} (RHEL) 9.2 or later. Use a minimal operating system base install. +* A non-root user for the {RHEL} host, with sudo or other Ansible supported privilege escalation (sudo recommended). This user is responsible for the installation of containerized {PlatformNameShort}. +* SSH public key authentication for the non-root user. For guidelines on setting up SSH public key authentication for the non-root user, see link:https://access.redhat.com/solutions/4110681[How to configure SSH public key authentication for passwordless login]. +** SSH keys are only required when installing on remote hosts. If doing a self contained local VM based installation, you can use `ansible_connection=local`. +* Internet access from the {RHEL} host if you are using the default online installation method. +* The appropriate network ports are open if a firewall is in place. For more information about the ports to open, see link:{URLTopologies}/container-topologies[Container topologies] in _{TitleTopologies}_. -* A RHEL 9.2 based host. Minimal OS base install is recommended. -* A non-root user for the RHEL host, with sudo or other Ansible supported privilege escalation (sudo recommended). This user is responsible for the installation of containerized {PlatformNameShort}. -* It is recommended setting up an *SSH public key authentication* for the non-root user. For guidelines on setting up an SSH public key authentication for the non-root user, see link:https://access.redhat.com/solutions/4110681[How to configure SSH public key authentication for passwordless login]. -* SSH keys are only required when installing on remote hosts. If doing a self contained local VM based installation, you can use *ansible_connection: local* as per the example which does not require SSH. -* Internet access from the RHEL host if using the default online installation method. +== Tested deployment topologies -== System Requirements -Your system must meet the following minimum system requirements to install and run Red Hat Containerized Ansible Automation Platform. +Red Hat tests {PlatformNameShort} {PlatformVers} with a defined set of topologies to give you opinionated deployment options. The supported topologies include infrastructure topology diagrams, tested system configurations, example inventory files, and network ports information. -[cols=2] -|====================== -| Memory | 16Gb RAM -| CPU | 4 CPU -| Disk space | 40Gb -| Disk IOPs | 1500 -|====================== +For containerized {PlatformNameShort}, there are two infrastructure topology shapes: +. Growth - (All-in-one) Intended for organizations that are getting started with {PlatformNameShort}. This topology allows for smaller footprint deployments. +. Enterprise - Intended for organizations that require {PlatformNameShort} deployments to have redundancy or higher compute for large volumes of automation. This is a more future-proofed scaled out architecture. +For more information about the tested deployment topologies for containerized {PlatformNameShort}, see link:{URLTopologies}/container-topologies[Container topologies] in _{TitleTopologies}_. + +== System requirements +Each virtual machine (VM) has the following system requirements: + +[cols=2,options="header"] +|==== +| Requirement | Minimum requirement +| RAM | 16 GB +| CPUs | 4 +| Local disk | 60 GB +| Disk IOPS | 3000 +|==== + +If performing a bundled installation of the growth topology with `hub_seed_collections=true`, then 32 GB RAM is recommended. Note that with this configuration the install time is going to increase and can take 45 or more minutes alone to complete seeding the collections. + +=== PostgreSQL requirements +{PlatformName} {PlatformVers} uses {PostgresVers} and requires the external (customer supported) databases to have ICU support. include::platform/proc-preparing-the-rhel-host-for-containerized-installation.adoc[leveloffset=+1] -include::platform/proc-installing-ansible-core.adoc[leveloffset=+1] include::platform/proc-downloading-containerized-aap.adoc[leveloffset=+1] -include::platform/proc-using-postinstall.adoc[leveloffset=+1] +include::platform/ref-configuring-inventory-file.adoc[leveloffset=+1] +include::platform/proc-setup-postgresql-ext-database-containerized.adoc[leveloffset=+2] +include::platform/proc-set-registry-username-password.adoc[leveloffset=+2] +include::platform/ref-using-custom-tls-certificates.adoc[leveloffset=+2] +include::platform/ref-using-custom-receptor-signing-keys.adoc[leveloffset=+2] +include::platform/ref-enabling-automation-hub-collection-and-container-signing.adoc[leveloffset=+2] +include::platform/ref-adding-execution-nodes.adoc[leveloffset=+2] +include::platform/proc-add-eda-safe-plugin-var.adoc[leveloffset=+2] + include::platform/proc-installing-containerized-aap.adoc[leveloffset=+1] +//include::platform/proc-using-postinstall.adoc[leveloffset=+1] include::platform/ref-accessing-control-auto-hub-eda-control.adoc[leveloffset=+1] -include::platform/ref-using-custom-tls-certificates.adoc[leveloffset=+1] -include::platform/ref-using-custom-receptor-signing-keys.adoc[leveloffset=+1] -include::platform/ref-enabling-automation-hub-collection-and-container-signing.adoc[leveloffset=+1] -include::platform/ref-adding-execution-nodes.adoc[leveloffset=+1] +include::platform/proc-update-aap-container.adoc[leveloffset=+1] +include::platform/proc-backup-aap-container.adoc[leveloffset=+1] +include::platform/proc-restore-aap-container.adoc[leveloffset=+1] include::platform/proc-uninstalling-containerized-aap.adoc[leveloffset=+1] +include::platform/proc-reinstalling-containerized-aap.adoc[leveloffset=+1] ifdef::parent-context-of-aap-containerized-installation[:context: {parent-context-of-aap-containerized-installation}] diff --git a/downstream/assemblies/platform/assembly-aap-manifest-files.adoc b/downstream/assemblies/platform/assembly-aap-manifest-files.adoc index 543c98cce6..c61f15c668 100644 --- a/downstream/assemblies/platform/assembly-aap-manifest-files.adoc +++ b/downstream/assemblies/platform/assembly-aap-manifest-files.adoc @@ -1,9 +1,7 @@ - +// emurtoug removed this file from the planning guide to avoid duplication of subscription content within Access mangement and authentication ifdef::context[:parent-context: {context}] - - [id="assembly-aap-obtain-manifest-files"] = Obtaining a manifest file @@ -21,4 +19,4 @@ include::platform/proc-aap-add-merge-subscriptions.adoc[leveloffset=+1] include::platform/proc-aap-generate-manifest-file.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] -ifndef::parent-context[:!context:] +ifndef::parent-context[:!context:] \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-aap-migration.adoc b/downstream/assemblies/platform/assembly-aap-migration.adoc index d9d825f85b..448df660bb 100644 --- a/downstream/assemblies/platform/assembly-aap-migration.adoc +++ b/downstream/assemblies/platform/assembly-aap-migration.adoc @@ -2,18 +2,24 @@ ifdef::context[:parent-context: {context}] [id="aap-migration"] -= Migrating {PlatformName} to {OperatorPlatform} += Migrating {PlatformName} to {OperatorPlatformName} :context: aap-migration [role="_abstract"] -Migrating your {PlatformName} deployment to the {OperatorPlatform} allows you to take advantage of the benefits provided by a Kubernetes native operator, including simplified upgrades and full lifecycle support for your {PlatformName} deployments. +Migrating your {PlatformName} deployment to the {OperatorPlatformNameShort} allows you to take advantage of the benefits provided by a Kubernetes native operator, including simplified upgrades and full lifecycle support for your {PlatformName} deployments. -Use these procedures to migrate any of the following deployments to the {OperatorPlatform}: +[NOTE] +==== +Upgrades of {EDAName} version 2.4 to 2.5 are not supported. Database migrations between {EDAName} 2.4 and {EDAName} 2.5 are not compatible. +==== -* A VM-based installation of Ansible Tower 3.8.6, {ControllerName}, or {HubName} -* An Openshift instance of Ansible Tower 3.8.6 ({PlatformNameShort} 1.2) +Use these procedures to migrate any of the following deployments to the {OperatorPlatformNameShort}: + +* OpenShift cluster A to OpenShift cluster B +* OpenShift namespace A to OpenShift namespace B +* Virtual machine (VM) based or containerized VM {PlatformNameShort} 2.5 → {PlatformNameShort} 2.5 include::platform/con-aap-migration-considerations.adoc[leveloffset=+1] include::platform/con-aap-migration-prepare.adoc[leveloffset=+1] @@ -24,6 +30,7 @@ include::platform/proc-verify-network-connectivity.adoc[leveloffset=+2] include::platform/proc-aap-migration.adoc[leveloffset=+1] include::platform/proc-aap-create_controller.adoc[leveloffset=+2] include::platform/proc-aap-create_hub.adoc[leveloffset=+2] +include::platform/proc-aap-create_eda.adoc[leveloffset=+2] include::platform/proc-post-migration-cleanup.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-aap-platform-components.adoc b/downstream/assemblies/platform/assembly-aap-platform-components.adoc index addd8902ff..ccec368caa 100644 --- a/downstream/assemblies/platform/assembly-aap-platform-components.adoc +++ b/downstream/assemblies/platform/assembly-aap-platform-components.adoc @@ -8,7 +8,19 @@ ifdef::context[:parent-context: {context}] [role="_abstract"] -{PlatformNameShort} is a modular platform composed of separate components that can be connected together to meet your deployment needs. {PlatformNameShort} deployments start with {ControllerName} which is the enterprise framework for controlling, securing, and managing Ansible automation with a user interface (UI) and RESTful application programming interface (API). Then, you can add to your deployment any combination of the following automation platform components: +{PlatformNameShort} is composed of services that are connected together to meet your automation needs. These services provide the ability to store, make decisions for, and execute automation. All of these functions are available through a user interface (UI) and RESTful application programming interface (API). Deploy each of the following components so that all features and capabilities are available for use without the need to take further action: + +* {GatewayStart} +* {HubNameStart} +* {PrivateHubNameStart} +* High availability {HubName} +* {EDAcontroller} +* {AutomationMeshStart} +* {ExecEnvNameStart} +* {Galaxy} +* {NavigatorStart} + +include::platform/con-about-platform-gateway.adoc[leveloffset=+1] include::platform/con-about-automation-hub.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-aap-post-upgrade.adoc b/downstream/assemblies/platform/assembly-aap-post-upgrade.adoc new file mode 100644 index 0000000000..a693612790 --- /dev/null +++ b/downstream/assemblies/platform/assembly-aap-post-upgrade.adoc @@ -0,0 +1,30 @@ +ifdef::context[:parent-context: {context}] + +[id="aap-post-upgrade"] += {PlatformNameShort} post-upgrade steps + +:context: aap-post-upgrade + +[role="_abstract"] + +After a successful upgrade to {PlatformNameShort} 2.5, the next crucial step is migrating your users to the latest version of the platform. + +User data and legacy authentication settings from {ControllerName} and {PrivateHubName} are carried over during the upgrade process and allow seamless initial access to the platform after upgrade. Customers can log in without additional action. + +However, to fully transition authentication to use all of the features and capabilities of the 2.5 {Gateway}, a manual process is required post-upgrade to leverage the new authentication framework. In the context of upgrading to {PlatformNameShort} 2.5, this manual process is referred to as _migration_. + +There are important notes and considerations for each type of user migration, including the following: + +* Admin users +* Normal users +* SAML users + +Be sure to read through the important notes highlighted for each user type to help make the migration process as smooth as possible. + +include::platform/proc-aap-migrate-admin-users.adoc[leveloffset=+1] +include::platform/con-aap-migrate-normal-users.adoc[leveloffset=+1] +include::platform/proc-account-linking.adoc[leveloffset=+2] +include::platform/proc-aap-migrate-SAML-users.adoc[leveloffset=+1] + +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/platform/assembly-aap-recovery.adoc b/downstream/assemblies/platform/assembly-aap-recovery.adoc index 6b87e7237e..9592a596d0 100644 --- a/downstream/assemblies/platform/assembly-aap-recovery.adoc +++ b/downstream/assemblies/platform/assembly-aap-recovery.adoc @@ -7,12 +7,14 @@ ifdef::context[:parent-context: {context}] :context: aap-recovery [role="_abstract"] -If you lose information on your system or issues with an upgrade, you can use the backup resources of your deployment instances. Use these procedures to recover your {ControllerName} and {HubName} deployment files. -//part of 2.5 release, (AAP-22178) uncomment when publishing [gmurray] -//include::platform/proc-aap-platform-gateway-restore.adoc[leveloffset=+1] +If you lose information on your system or experience issues with an upgrade, you can use the backup resources of your deployment instances. Use the following procedures to recover your {PlatformNameShort} deployment files. + +include::platform/proc-aap-platform-gateway-restore.adoc[leveloffset=+1] include::platform/proc-aap-controller-restore.adoc[leveloffset=+1] +include::platform/proc-aap-controller-yaml-restore.adoc[leveloffset=+1] + include::platform/proc-aap-hub-restore.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] diff --git a/downstream/assemblies/platform/assembly-aap-upgrades.adoc b/downstream/assemblies/platform/assembly-aap-upgrades.adoc index 9b0678bcdb..2303ff7c74 100644 --- a/downstream/assemblies/platform/assembly-aap-upgrades.adoc +++ b/downstream/assemblies/platform/assembly-aap-upgrades.adoc @@ -8,12 +8,9 @@ ifdef::context[:parent-context: {context}] [role="_abstract"] -Upgrade to {PlatformName} {PlatformVers} by setting up your inventory and running the installation script. -Ansible then upgrades your deployment to {PlatformVers}. -If you plan to upgrade from {PlatformNameShort} 2.0 or earlier, you must migrate Ansible content for compatibility with {PlatformVers}. include::platform/con-aap-upgrades.adoc[leveloffset=+1] -include::platform/con-aap-upgrades-legacy.adoc[leveloffset=+1] +// include::platform/con-aap-upgrades-legacy.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] diff --git a/downstream/assemblies/platform/assembly-aap-upgrading-platform.adoc b/downstream/assemblies/platform/assembly-aap-upgrading-platform.adoc index ac1f9fe009..15e755b1db 100644 --- a/downstream/assemblies/platform/assembly-aap-upgrading-platform.adoc +++ b/downstream/assemblies/platform/assembly-aap-upgrading-platform.adoc @@ -7,14 +7,27 @@ ifdef::context[:parent-context: {context}] [role="_abstract"] -To upgrade your {PlatformName}, start by reviewing planning information to ensure a successful upgrade. +To upgrade your {PlatformName}, start by reviewing link:{LinkPlanningGuide} to ensure a successful upgrade. You can then download the desired version of the {PlatformNameShort} installer, configure the inventory file in the installation bundle to reflect your environment, and then run the installer. +== Prerequisites + +Upgrades to {PlatformNameShort} 2.5 include the link:{URLPlanningGuide}/ref-aap-components#con-about-platform-gateway_planning[{Gateway}]. Ensure you review the link:{URLPlanningGuide}/ref-network-ports-protocols_planning[2.5 Network ports and protocols] for architectural changes and link:{LinkTopologies} for information on opinionated deployment models. + +You have reviewed the link:{URLPlanningGuide}/ha-redis_planning#gw-centralized-redis_planning[centralized Redis] instance offered by {PlatformNameShort} for both standalone and clustered topologies. + +Prior to upgrading your {PlatformName}, ensure you have reviewed link:{LinkPlanningGuide} for a successful upgrade. You can then download the desired version of the {PlatformNameShort} installer, configure the inventory file in the installation bundle to reflect your environment, and then run the installer. + include::platform/con-aap-upgrade-planning.adoc[leveloffset=+1] include::platform/proc-choosing-obtaining-installer.adoc[leveloffset=+1] include::platform/proc-editing-inventory-file-for-updates.adoc[leveloffset=+1] +include::platform/con-backup-aap.adoc[leveloffset=+1] include::platform/proc-running-setup-script-for-updates.adoc[leveloffset=+1] +include::platform/proc-upgrade-controller-hub-eda-unified-ui.adoc[leveloffset=+1] +// [ddacosta] - Moved to a new post upgrade section of the doc +//include::platform/proc-account-linking.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] + \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-ag-controller-backup-and-restore.adoc b/downstream/assemblies/platform/assembly-ag-controller-backup-and-restore.adoc index c0e8b0ea55..c81379ba50 100644 --- a/downstream/assemblies/platform/assembly-ag-controller-backup-and-restore.adoc +++ b/downstream/assemblies/platform/assembly-ag-controller-backup-and-restore.adoc @@ -13,7 +13,7 @@ However, you must use the most recent minor version of a release to backup or re For example, if the current {PlatformNameShort} version you are on is 2.0.x, use only the latest 2.0 installer. Backup and restore only works on PostgreSQL versions supported by your current platform version. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/red_hat_ansible_automation_platform_installation_guide/index#red_hat_ansible_automation_platform_system_requirements[{PlatformName} system requirements] in the _{PlatformName} Installation Guide_. +For more information, see link:{URLPlanningGuide}/platform-system-requirements[System requirements] in the _{TitlePlanningGuide}_. ==== The {PlatformNameShort} setup playbook is invoked as `setup.sh` from the path where you unpacked the platform installer tarball. diff --git a/downstream/assemblies/platform/assembly-ag-controller-clustering.adoc b/downstream/assemblies/platform/assembly-ag-controller-clustering.adoc index 3dd44525c9..05efb34783 100644 --- a/downstream/assemblies/platform/assembly-ag-controller-clustering.adoc +++ b/downstream/assemblies/platform/assembly-ag-controller-clustering.adoc @@ -13,11 +13,11 @@ Load balancing is optional, and it is entirely possible to have ingress on one o Each instance must be able to join the {ControllerName} cluster and expand its ability to run jobs. This is a simple system where jobs can run anywhere rather than be directed on where to run. -Also, you can group clustered instances into different pools or queues, called xref:controller-instance-groups[Instance groups]. +Also, you can group clustered instances into different pools or queues, called link:{URLControllerUserGuide}/controller-instance-groups[Instance groups] as described in _{ControllerUG}_. {PlatformNameShort} supports container-based clusters by using Kubernetes, meaning you can install new {ControllerName} instances on this platform without any variation or diversion in functionality. You can create instance groups to point to a Kubernetes container. -For more information, see the xref:controller-instance-and-container-groups[Container and instance groups] section. +For more information, see the link:{URLControllerUserGuide}/controller-instance-and-container-groups[Instance and container groups] section in _{ControllerUG}_. .Supported operating systems diff --git a/downstream/assemblies/platform/assembly-ag-controller-config.adoc b/downstream/assemblies/platform/assembly-ag-controller-config.adoc index f825566196..aa14e7e401 100644 --- a/downstream/assemblies/platform/assembly-ag-controller-config.adoc +++ b/downstream/assemblies/platform/assembly-ag-controller-config.adoc @@ -2,23 +2,24 @@ = {ControllerNameStart} configuration -You can configure some {ControllerName} options using the *Settings* menu of the User Interface. +You can configure some {ControllerName} options by using the *Settings* menu of the User Interface. -//Each tab contains fields with a *Reset* option, enabling you to revert any value entered back to the default value. -//*Reset All* enables you to revert all the values to their factory default values. +*Save* applies the changes you make, but it does not exit the edit dialog. -//*Save* applies the changes you make, but it does not exit the edit dialog. To return to the *Settings* page, from the navigation panel select {MenuAEAdminSettings} or use the breadcrumbs at the top of the current view. //Now a separate option covered by Donna //include::platform/proc-controller-authentication.adoc[leveloffset=+1] -include::platform/proc-controller-configure-jobs.adoc[leveloffset=+1] +//[ddacosta] subscription content moved to access management guide +//include::platform/proc-controller-configure-subscriptions.adoc[leveloffset=+1] include::platform/proc-controller-configure-system.adoc[leveloffset=+1] +include::platform/proc-controller-configure-jobs.adoc[leveloffset=+1] +include::platform/ref-controller-logging-settings.adoc[leveloffset=+1] //The only directly controller related thing here is the custom logo which is covered separately //include::platform/proc-controller-configure-user-interface.adoc[leveloffset=+1] -//This doesn't exisat in the documented form //include::platform/proc-controller-configure-usability-analytics.adoc[leveloffset=+2] -include::platform/con-controller-custom-logos.adoc[leveloffset=+1] +//include::platform/con-controller-custom-logos.adoc[leveloffset=+1] +include::platform/proc-controller-configure-analytics.adoc[leveloffset=+1] include::platform/con-controller-additional-settings.adoc[leveloffset=+1] //This should be in Hala's documentation //include::platform/proc-controller-obtaining-subscriptions.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-ag-controller-troubleshooting.adoc b/downstream/assemblies/platform/assembly-ag-controller-troubleshooting.adoc index 92a1208f6f..eacda3ed35 100644 --- a/downstream/assemblies/platform/assembly-ag-controller-troubleshooting.adoc +++ b/downstream/assemblies/platform/assembly-ag-controller-troubleshooting.adoc @@ -4,9 +4,9 @@ Useful troubleshooting information for {ControllerName}. -include::platform/ref-controller-connect-to-host.adoc[leveloffset=+1] +//include::platform/ref-controller-connect-to-host.adoc[leveloffset=+1] include::platform/ref-controller-unable-to-login-http.adoc[leveloffset=+1] -include::platform/ref-controller-run-a-playbook.adoc[leveloffset=+1] +//include::platform/ref-controller-run-a-playbook.adoc[leveloffset=+1] include::platform/ref-controller-unable-to-run-job.adoc[leveloffset=+1] include::platform/ref-controller-playbooks-not-showing.adoc[leveloffset=+1] include::platform/ref-controller-playbook-pending.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-ag-controller-usability-analytics.adoc b/downstream/assemblies/platform/assembly-ag-controller-usability-analytics.adoc index e87b501840..8b5bc3d6d0 100644 --- a/downstream/assemblies/platform/assembly-ag-controller-usability-analytics.adoc +++ b/downstream/assemblies/platform/assembly-ag-controller-usability-analytics.adoc @@ -11,8 +11,9 @@ Only users installing a trial of or a fresh installation of are opted-in for thi //You can opt out or control the way {ControllerName} collects data by setting your participation level in the *User Interface settings* in the {MenuAEAdminSettings} menu. //Should Settings menu be a link? +For information on setting up {Analytics}, see xref:proc-controller-configure-analytics[Configuring {Analytics}]. -include::platform/proc-controller-control-data-collection.adoc[leveloffset=+1] +//include::platform/proc-controller-control-data-collection.adoc[leveloffset=+1] include::platform/ref-controller-automation-analytics.adoc[leveloffset=+1] include::platform/ref-controller-use-by-organization.adoc[leveloffset=+2] include::platform/ref-controller-jobs-run-by-organization.adoc[leveloffset=+2] diff --git a/downstream/assemblies/platform/assembly-ag-instance-and-container-groups.adoc b/downstream/assemblies/platform/assembly-ag-instance-and-container-groups.adoc index 41bff27321..1f8e158308 100644 --- a/downstream/assemblies/platform/assembly-ag-instance-and-container-groups.adoc +++ b/downstream/assemblies/platform/assembly-ag-instance-and-container-groups.adoc @@ -7,7 +7,7 @@ This is called a container group. You can execute jobs in a container group only as-needed per playbook. For more information, see xref:controller-container-groups[Container groups]. -For {ExecEnvShort}s, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#assembly-controller-execution-environments[Execution environments] in the _{ControllerUG}_. +For {ExecEnvShort}s, see xref:assembly-controller-execution-environments[Execution environments]. include::platform/con-controller-instance-groups.adoc[leveloffset=+1] include::platform/ref-controller-group-policies-automationcontroller.adoc[leveloffset=+2] diff --git a/downstream/assemblies/platform/assembly-appendix-inventory-file-vars.adoc b/downstream/assemblies/platform/assembly-appendix-inventory-file-vars.adoc index 70b64619a1..fdb70c98fd 100644 --- a/downstream/assemblies/platform/assembly-appendix-inventory-file-vars.adoc +++ b/downstream/assemblies/platform/assembly-appendix-inventory-file-vars.adoc @@ -1,15 +1,19 @@ [id="appendix-inventory-files-vars"] = Inventory file variables -The following tables contain information about the pre-defined variables used in Ansible installation inventory files. -Not all of these variables are required. +The following tables contain information about the variables used in {PlatformNameShort}'s installation `inventory` files. The tables include the variables that you can use for RPM-based installation and {ContainerBase}. -include::platform/ref-general-inventory-variables.adoc[leveloffset=+1] +include::platform/ref-ansible-inventory-variables.adoc[leveloffset=+1] include::platform/ref-hub-variables.adoc[leveloffset=+1] -// SSO variables moved into hub-variables. -//include::platform/ref-sso-variables.adoc[leveloffset=+1] -// Catalog removed for 2.4 -//include::platform/ref-catalog-variables.adoc[leveloffset=+1] include::platform/ref-controller-variables.adoc[leveloffset=+1] -include::platform/ref-ansible-inventory-variables.adoc[leveloffset=+1] +include::platform/ref-database-inventory-variables.adoc[leveloffset=+1] include::platform/ref-eda-controller-variables.adoc[leveloffset=+1] +include::platform/ref-general-inventory-variables.adoc[leveloffset=+1] +include::platform/ref-images-inventory-variables.adoc[leveloffset=+1] +include::platform/ref-gateway-variables.adoc[leveloffset=+1] +include::platform/ref-receptor-inventory-variables.adoc[leveloffset=+1] + +// SSO variables moved into hub-variables. +//include::platform/ref-sso-variables.adoc[leveloffset=+1] +// Catalog removed for 2.4 +//include::platform/ref-catalog-variables.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-appendix-operator-crs.adoc b/downstream/assemblies/platform/assembly-appendix-operator-crs.adoc new file mode 100644 index 0000000000..bc4b915290 --- /dev/null +++ b/downstream/assemblies/platform/assembly-appendix-operator-crs.adoc @@ -0,0 +1,15 @@ + +ifdef::context[:parent-context: {context}] + +[id="appendix-operator-crs_{context}"] + += Appendix: {PlatformName} custom resources + +[role="_abstract"] + +This appendix provides a reference for the {PlatformNameShort} custom resources for various deployment scenarios. + +include::platform/ref-operator-crs.adoc[leveloffset=+1] + +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-appendix-troubleshoot-containerized-aap.adoc b/downstream/assemblies/platform/assembly-appendix-troubleshoot-containerized-aap.adoc new file mode 100644 index 0000000000..b21a2ea5bb --- /dev/null +++ b/downstream/assemblies/platform/assembly-appendix-troubleshoot-containerized-aap.adoc @@ -0,0 +1,10 @@ +[id="appendix-troubleshoot-containerized-aap"] += Troubleshooting containerized {PlatformNameShort} + +Use this information to troubleshoot your containerized {PlatformNameShort} installation. + +include::platform/ref-containerized-troubleshoot-diagnosing.adoc[leveloffset=+1] +include::platform/ref-containerized-troubleshoot-install.adoc[leveloffset=+1] +include::platform/ref-containerized-troubleshoot-config.adoc[leveloffset=+1] +include::platform/ref-containerized-troubleshoot-ref.adoc[leveloffset=+1] + diff --git a/downstream/assemblies/platform/assembly-automation-mesh-operator-aap.adoc b/downstream/assemblies/platform/assembly-automation-mesh-operator-aap.adoc index 189e4da62a..11a48e4336 100644 --- a/downstream/assemblies/platform/assembly-automation-mesh-operator-aap.adoc +++ b/downstream/assemblies/platform/assembly-automation-mesh-operator-aap.adoc @@ -2,17 +2,17 @@ = {AutomationMeshStart} for operator-based {PlatformName} -Scaling your automation mesh is available on OpenShift deployments of {PlatformName} and is possible through adding or removing nodes from your cluster dynamically, using the *Instances* resource of the {ControllerName} UI, without running the installation script. +Scaling your automation mesh is available on OpenShift deployments of {PlatformName} and is possible through adding or removing nodes from your cluster dynamically, using the *Instances* resource of the {PlatformNameShort} UI, without running the installation script. Instances serve as nodes in your mesh topology. {AutomationMeshStart} enables you to extend the footprint of your automation. The location where you launch a job can be different from the location where the ansible-playbook runs. -To manage instances from the {ControllerName} UI, you must have System Administrator or System Auditor permissions. +To manage instances from the {PlatformNameShort} UI, you must have System Administrator or System Auditor permissions. In general, the more processor cores (CPU) and memory (RAM) a node has, the more jobs that can be scheduled to run on that node at once. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-jobs#controller-capacity-determination[Automation controller capacity determination and job impact]. +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/using_automation_execution/controller-jobs#controller-capacity-determination[Automation controller capacity determination and job impact]. include::platform/ref-operator-mesh-prerequisites.adoc[leveloffset=+1] include::platform/proc-set-up-virtual-machines.adoc[leveloffset=+1] @@ -23,6 +23,7 @@ include::platform/proc-run-jobs-on-execution-nodes.adoc[leveloffset=+1] include::platform/proc-connecting-nodes-through-mesh-ingress.adoc[leveloffset=+1] include::platform/proc-pulling-the-secret.adoc[leveloffset=+1] include::platform/ref-removing-instances.adoc[leveloffset=+1] +include::platform/proc-operator-mesh-upgrading-receptors.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-configure-aap-operator.adoc b/downstream/assemblies/platform/assembly-configure-aap-operator.adoc index e843928e5a..15eab83857 100644 --- a/downstream/assemblies/platform/assembly-configure-aap-operator.adoc +++ b/downstream/assemblies/platform/assembly-configure-aap-operator.adoc @@ -4,24 +4,31 @@ ifdef::context[:parent-context: {context}] :context: configure-aap-operator -= Configuring the {OperatorPlatform} on {OCP} += Configuring the {OperatorPlatformName} on {OCP} -The platform gateway for {PlaformNameShort} enables you to manage the following {PlatformNameShort} components to form a single user interface: +The {Gateway} for {PlatformNameShort} enables you to manage the following {PlatformNameShort} components to form a single user interface: * {ControllerNameStart} * {HubNameStart} * {EDAName} * {LightspeedShortName} (This feature is disabled by default, you must opt in to use it.) -Before you can deploy the platform gateway you need to have {OperatorPlatform} installed in a namespace. -If you have not installed {OperatorPlatform} see <>. +Before you can deploy the {Gateway} you must have {OperatorPlatformNameShort} installed in a namespace. +If you have not installed {OperatorPlatformNameShort} see xref:install-aap-operator_operator-platform-doc[Installing the {OperatorPlatformName} on {OCP}]. -If you have the {OperatorPlatform} and some or all of the {PlatformNameShort} components installed see <> for how to proceed. +[NOTE] +==== +{GatewayStart} is only available under {OperatorPlatformNameShort} version 2.5. Every component deployed under {OperatorPlatformNameShort} 2.5 defaults to version 2.5. +==== + +If you have the {OperatorPlatformNameShort} and some or all of the {PlatformNameShort} components installed see xref:operator-deploy-central-config_{context}[Deploying the {Gateway} with existing {PlatformNameShort} components] for how to proceed. include::platform/proc-operator-link-components.adoc[leveloffset=+1] include::platform/proc-operator-access-aap.adoc[leveloffset=+1] include::platform/proc-operator-deploy-central-config.adoc[leveloffset=+1] -include::platform/proc-operator-aap-troubleshooting.adoc[leveloffset=+1] +include::platform/proc-operator-external-db-gateway.adoc[leveloffset=+1] +include::platform/proc-operator-enable-https-redirect.adoc[leveloffset=+1] +include::platform/proc-operator-aap-faq.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-controller-activity-stream.adoc b/downstream/assemblies/platform/assembly-controller-activity-stream.adoc new file mode 100644 index 0000000000..2d86bfabc8 --- /dev/null +++ b/downstream/assemblies/platform/assembly-controller-activity-stream.adoc @@ -0,0 +1,22 @@ +[id="assembly-controller-activity-stream"] + += Activity stream + +* From the navigation panel, select {MenuAEAdminActivityStream}. ++ +image::activity_stream_page.png[Activity stream page] + +An Activity Stream shows all changes for a particular object. +For each change, the Activity Stream shows the time of the event, the user that initiated the event, and the action. +The information displayed varies depending on the type of event. + +* Click the image:examine.png[Examine,15,15] icon to display the event log for the change. ++ +image:activity_stream_details.png[Activity stream details] + +You can filter the Activity Stream by the initiating user, by system (if it was system initiated), or by any related object, such as a credential, job template, or schedule. +The Activity Stream shows the Activity Stream for the entire instance. +Most pages permit viewing an activity stream filtered for that specific object. + +You can view the activity stream on any page by clicking the btn:[Activity Stream] image:activitystream.png[activitystream,15,15] icon. + diff --git a/downstream/assemblies/platform/assembly-controller-applications.adoc b/downstream/assemblies/platform/assembly-controller-applications.adoc index b21c272465..181bc806c1 100644 --- a/downstream/assemblies/platform/assembly-controller-applications.adoc +++ b/downstream/assemblies/platform/assembly-controller-applications.adoc @@ -3,7 +3,12 @@ = Applications Create and configure token-based authentication for external applications such as ServiceNow and Jenkins. -With token-based authentication, external applications can easily integrate with {ControllerName}. +With token-based authentication, external applications can easily integrate with {PlatformNameShort}. + +[IMPORTANT] +==== +{ControllerNameStart} OAuth applications on the platform UI are not supported for 2.4 to 2.5 migration. See link:https://access.redhat.com/solutions/7091987[this Knowledgebase article] for more information. +==== With OAuth 2 you can use tokens to share data with an application without disclosing login information. You can configure these tokens as read-only. @@ -11,9 +16,12 @@ You can configure these tokens as read-only. You can create an application that is representative of the external application you are integrating with, then use it to create tokens for the application to use on behalf of its users. Associating these tokens with an application resource enables you to manage all tokens issued for a particular application. -By separating the issue of tokens under *OAuth Applications*, you can revoke all tokens based on the Application without having to revoke all tokens in the system. +By separating the issue of tokens under *OAuth Applications*, you can revoke all tokens based on the application without having to revoke all tokens in the system. include::platform/ref-controller-applications-getting-started.adoc[leveloffset=+1] +include::platform/ref-gw-access-rules-apps-tokens.adoc[leveloffset=+2] +include::platform/ref-gw-application-functions.adoc[leveloffset=+2] +include::platform/ref-gw-request-token-after-expiration.adoc[leveloffset=+3] include::platform/proc-controller-create-application.adoc[leveloffset=+1] //include::platform/ref-controller-apps-add-tokens.adoc[leveloffset=+2] -include::platform/proc-controller-apps-create-tokens.adoc[leveloffset=+2] + diff --git a/downstream/assemblies/platform/assembly-controller-best-practices.adoc b/downstream/assemblies/platform/assembly-controller-best-practices.adoc index 18810f439b..c1c6cba17c 100644 --- a/downstream/assemblies/platform/assembly-controller-best-practices.adoc +++ b/downstream/assemblies/platform/assembly-controller-best-practices.adoc @@ -20,7 +20,7 @@ For more information, see the link https://docs.ansible.com/ansible/latest/tips_ [NOTE] ==== * Avoid using the playbooks `vars_prompt` feature, as {ControllerName} does not interactively permit `vars_prompt` questions. -If you cannot avoid using `vars_prompt`, see the xref:controller-surveys-in-job-templates[Surveys] functionality. +If you cannot avoid using `vars_prompt`, see the xref:controller-surveys-in-job-templates[Surveys in job templates] functionality. * Avoid using the playbooks `pause` feature without a timeout, as {ControllerName} does not permit canceling a pause interactively. If you cannot avoid using `pause`, you must set a timeout. @@ -52,7 +52,7 @@ Use the "callback" feature to permit newly booting instances to request configur == Larger Host Counts Set "forks" on a job template to larger values to increase parallelism of execution runs. -For more information on tuning Ansible, see link:https://www.ansible.com/blog/ansible-performance-tuning[the Ansible blog]. +//For more information about tuning Ansible, see link:https://www.ansible.com/blog/ansible-performance-tuning[the Ansible blog]. == Continuous integration / Continuous Deployment diff --git a/downstream/assemblies/platform/assembly-controller-credentials.adoc b/downstream/assemblies/platform/assembly-controller-credentials.adoc index 514a24f829..3d54e54b97 100644 --- a/downstream/assemblies/platform/assembly-controller-credentials.adoc +++ b/downstream/assemblies/platform/assembly-controller-credentials.adoc @@ -1,16 +1,16 @@ [id="controller-credentials"] -ifdef::controller-GS[] -= Managing credentials +//ifdef::controller-GS[] +//= Managing credentials -Credentials authenticate the controller user to launch Ansible playbooks. The passwords and SSH keys are used to authenticate against inventory hosts. -By using the credentials feature of {ControllerName}, you can require the {ControllerName} user to enter a password or key phrase when a playbook launches. +//Credentials authenticate the controller user to launch Ansible playbooks. The passwords and SSH keys are used to authenticate against inventory hosts. +//By using the credentials feature of {ControllerName}, you can require the {ControllerName} user to enter a password or key phrase when a playbook launches. -include::platform/proc-controller-create-credential.adoc[leveloffset=+1] -include::platform/proc-controller-edit-credential.adoc[leveloffset=+1] -endif::controller-GS[] -ifdef::controller-UG[] +//include::platform/proc-controller-create-credential.adoc[leveloffset=+1] +//include::platform/proc-controller-edit-credential.adoc[leveloffset=+1] +//endif::controller-GS[] +//ifdef::controller-UG[] = Managing user credentials @@ -22,7 +22,7 @@ If a user moves to a different team or leaves the organization, you do not have [NOTE] ==== {ControllerNameStart} encrypts passwords and key information in the database and never makes secret information visible through the API. -For further information, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#doc-wrapper[_{ControllerAG}_]. +For further information, see link:{URLControllerAdminGuide}[_{ControllerAG}_]. ==== == How credentials work @@ -31,12 +31,31 @@ To pass the key from {ControllerName} to SSH, the key must be decrypted before i {ControllerNameStart} uses that pipe to send the key to SSH, so that the key is never written to disk. If passwords are used, {ControllerName} handles them by responding directly to the password prompt and decrypting the password before writing it to the prompt. +[NOTE] +==== +It is possible to create duplicate credentials with the same name and without an organization. +However, it is not possible to create two duplicate credentials in the same organization. + +.Example + +. Create two machine credentials with the same name but without an organization. +. Use the module `ansible.controller.export` to export the credentials. +. Use the module `ansible.controller.import` in a different automation execution node. +. Check the imported credentials. + +When you export two duplicate credentials and then import them in a different node, only one credential is imported. +==== + //Removed as part of editorial review - include::platform/ref-controller-credentials-getting-started.adoc[leveloffset=+1] include::platform/proc-controller-create-credential.adoc[leveloffset=+1] include::platform/proc-controller-add-users-job-templates.adoc[leveloffset=+1] include::platform/ref-controller-credential-types.adoc[leveloffset=+1] include::platform/ref-controller-credential-aws.adoc[leveloffset=+2] include::platform/ref-controller-credential-galaxy-hub.adoc[leveloffset=+2] +//AWS Secrets Manager Lookup +include::platform/ref-controller-aws-secrets-lookup.adoc[leveloffset=+2] +//Bitbucket +include::platform/ref-controller-credential-bitbucket.adoc[leveloffset=+2] include::platform/ref-controller-credential-centrify-vault.adoc[leveloffset=+2] include::platform/ref-controller-credential-container-registry.adoc[leveloffset=+2] include::platform/ref-controller-credential-cyberark-central.adoc[leveloffset=+2] @@ -67,5 +86,5 @@ include::platform/ref-controller-credential-vault.adoc[leveloffset=+2] include::platform/ref-controller-credential-vmware-vcenter.adoc[leveloffset=+2] include::platform/ref-controller-use-credentials-in-playbooks.adoc[leveloffset=+1] -endif::controller-UG[] +//endif::controller-UG[] diff --git a/downstream/assemblies/platform/assembly-controller-glossary.adoc b/downstream/assemblies/platform/assembly-controller-glossary.adoc index 20e99ecef1..b5b41c869a 100644 --- a/downstream/assemblies/platform/assembly-controller-glossary.adoc +++ b/downstream/assemblies/platform/assembly-controller-glossary.adoc @@ -35,7 +35,7 @@ These pods are provisioned on-demand and exist only for the duration of the play === Credentials Authentication details that can be used by {ControllerName} to launch jobs against machines, to synchronize with inventory sources, and to import project content from a version control system. -For more information, see xref:controller-credentials[Credentials]. +For more information, see [Credentials]. [discrete] === Credential Plugin diff --git a/downstream/assemblies/platform/assembly-controller-hosts.adoc b/downstream/assemblies/platform/assembly-controller-hosts.adoc new file mode 100644 index 0000000000..81f49ea70c --- /dev/null +++ b/downstream/assemblies/platform/assembly-controller-hosts.adoc @@ -0,0 +1,16 @@ +[id="assembly-controller-hosts"] + += Hosts + +A host is a system managed by {PlatformNameShort}, which may include a physical, virtual, cloud-based server, or other device. + +Typically a host is an operating system instance. + +Hosts are grouped in inventories and are sometimes referred to as a “nodes”. + +Ansible works against multiple managed nodes or “hosts” in your infrastructure at the same time, using a list or group of lists known as an inventory. + +Once your inventory is defined, use patterns to select the hosts or groups you want Ansible to run against. + +include::platform/proc-controller-create-host.adoc[leveloffset=+1] +include::platform/proc-controller-view-host.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-controller-instances.adoc b/downstream/assemblies/platform/assembly-controller-instances.adoc index be6a9a22df..b86d368086 100644 --- a/downstream/assemblies/platform/assembly-controller-instances.adoc +++ b/downstream/assemblies/platform/assembly-controller-instances.adoc @@ -2,17 +2,17 @@ = Managing capacity with Instances -Scaling your {AutomationMesh} is available on OpenShift deployments of {PlatformName} and is possible through adding or removing nodes from your cluster dynamically, using the *Instances* resource of the {ControllerName} UI, without running the installation script. +Scaling your {AutomationMesh} is available on OpenShift deployments of {PlatformName} and is possible through adding or removing nodes from your cluster dynamically, using the *Instances* resource of the UI, without running the installation script. Instances serve as nodes in your mesh topology. {AutomationMeshStart} enables you to extend the footprint of your automation. The location where you launch a job can be different from the location where the ansible-playbook runs. -To manage instances from the {ControllerName} UI, you must have System Administrator or System Auditor permissions. +To manage instances from the UI, you must have System Administrator or System Auditor permissions. In general, the more processor cores (CPU) and memory (RAM) a node has, the more jobs that can be scheduled to run on that node at once. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-jobs#controller-capacity-determination[Automation controller capacity determination and job impact]. +For more information, see xref:controller-capacity-determination[Automation controller capacity determination and job impact]. //include::platform/ref-instances-prerequisites.adoc[leveloffset=+1] include::platform/ref-operator-mesh-prerequisites.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-controller-inventories.adoc b/downstream/assemblies/platform/assembly-controller-inventories.adoc index 82e672448f..988dc307b9 100644 --- a/downstream/assemblies/platform/assembly-controller-inventories.adoc +++ b/downstream/assemblies/platform/assembly-controller-inventories.adoc @@ -7,21 +7,21 @@ ifdef::context[:parent-context: {context}] = Inventories -ifdef::controller-GS[] -An inventory is a collection of hosts managed by {ControllerName}. -Organizations are assigned to inventories, while permissions to launch playbooks against inventories are controlled at the user or team level. +//ifdef::controller-GS[] +//An inventory is a collection of hosts managed by {ControllerName}. +//Organizations are assigned to inventories, while permissions to launch playbooks against inventories are controlled at the user or team level. -For more information, see the following documentation: +//For more information, see the following documentation: -* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#proc-controller-user-permissions[Adding and removing user permissions] -* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#proc-controller-team-add-user[Adding or removing a user] -* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/red_hat_ansible_automation_platform_planning_guide/index#about_the_installer_inventory_file[About the installer inventory file] +//* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#proc-controller-user-permissions[Adding and removing user permissions] +//* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#proc-controller-team-add-user[Adding or removing a user] +//* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/red_hat_ansible_automation_platform_planning_guide/index#about_the_installer_inventory_file[About the installer inventory file] -include::platform/proc-controller-create-inventory.adoc[leveloffset=+1] -include::platform/con-controller-groups-hosts.adoc[leveloffset=+1] -include::platform/proc-controller-add-groups-hosts.adoc[leveloffset=+2] -endif::controller-GS[] -ifdef::controller-UG[] +//include::platform/proc-controller-create-inventory.adoc[leveloffset=+1] +//include::platform/con-controller-groups-hosts.adoc[leveloffset=+1] +//include::platform/proc-controller-add-groups-hosts.adoc[leveloffset=+2] +//endif::controller-GS[] +//ifdef::controller-UG[] {PlatformName} works against a list of managed nodes or hosts in your infrastructure that are logically organized, using an inventory file. You can use the {PlatformName} installer inventory file to specify your installation scenario and describe host deployments to Ansible. @@ -29,13 +29,13 @@ By using an inventory file, Ansible can manage a large number of hosts with a si Inventories also help you use Ansible more efficiently by reducing the number of command line options you have to specify. Inventories are divided into groups and these groups contain the hosts. -Groups may be sourced manually, by entering host names into {ControllerName}, or from one of its supported cloud providers. +Groups can be sourced manually, by entering host names into {ControllerName}, or from one of its supported cloud providers. [NOTE] ==== If you have a custom dynamic inventory script, or a cloud provider that is not yet supported natively in {ControllerName}, you can also import that into {ControllerName}. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#assembly-inventory-file-importing[Inventory file importing] in the _{ControllerAG}_. +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/configuring_automation_execution/index#assembly-inventory-file-importing[Inventory file importing] in _{ControllerAG}_. ==== From the navigation panel, select {MenuInfrastructureInventories}. @@ -69,8 +69,8 @@ Click the Inventory name to display the *Details* page for the selected inventor inventory's groups and hosts. //Smart inventories are deprecated. -//include::platform/ref-controller-smart-inventories.adoc[leveloffset=+1] -//include::platform/ref-controller-smart-host-filter.adoc[leveloffset=+2] +include::platform/ref-controller-smart-inventories.adoc[leveloffset=+1] +include::platform/ref-controller-smart-host-filter.adoc[leveloffset=+2] //include::platform/proc-controller-define-filter-with-facts.adoc[leveloffset=+2] include::platform/ref-controller-constructed-inventories.adoc[leveloffset=+1] @@ -106,13 +106,14 @@ include::platform/proc-controller-inv-source-rh-virt.adoc[leveloffset=+4] include::platform/proc-controller-inv-source-aap.adoc[leveloffset=+4] //The following Terraform module is for 2.5 only: include::platform/proc-controller-inv-source-terraform.adoc[leveloffset=+4] +include::platform/proc-controller-inv-source-open-shift-virt.adoc[leveloffset=+4] include::platform/ref-controller-export-old-scripts.adoc[leveloffset=+3] include::platform/ref-controller-view-completed-jobs.adoc[leveloffset=+1] include::platform/proc-controller-run-ad-hoc-commands.adoc[leveloffset=+1] -endif::controller-UG[] +//endif::controller-UG[] ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/platform/assembly-controller-job-templates.adoc b/downstream/assemblies/platform/assembly-controller-job-templates.adoc index 93c0c91346..f5b30964ac 100644 --- a/downstream/assemblies/platform/assembly-controller-job-templates.adoc +++ b/downstream/assemblies/platform/assembly-controller-job-templates.adoc @@ -4,8 +4,9 @@ A job template combines an Ansible playbook from a project and the settings required to launch it. Job templates are useful to run the same job many times. Job templates also encourage the reuse of Ansible playbook content and collaboration between teams. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#controller-job-templates[Job Templates] in the _{ControllerUG}_. +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#controller-job-templates[Job Templates] in _{ControllerUG}_. include::platform/proc-controller-getting-started-with-job-templates.adoc[leveloffset=+1] +include::platform/proc-controller-create-job-template.adoc[leveloffset=+1] include::platform/proc-controller-edit-job-template.adoc[leveloffset=+1] include::platform/proc-controller-run-job-template.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-controller-logging-aggregation.adoc b/downstream/assemblies/platform/assembly-controller-logging-aggregation.adoc index 82fce29567..07289439ab 100644 --- a/downstream/assemblies/platform/assembly-controller-logging-aggregation.adoc +++ b/downstream/assemblies/platform/assembly-controller-logging-aggregation.adoc @@ -20,8 +20,23 @@ If you already use `rsyslog` for logging system logs on the {ControllerName} ins Use the `/api/v2/settings/logging/` endpoint to configure how the {ControllerName} `rsyslog` process handles messages that have not yet been sent in the event that your external logger goes offline: -* `LOG_AGGREGATOR_MAX_DISK_USAGE_GB`: Specifies the amount of data to store (in gigabytes) during an outage of the external log aggregator (defaults to 1). -Equivalent to the `rsyslogd queue.maxdiskspace` setting. +* `LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB`: Maximum disk persistence for rsyslogd action queuing in GB. ++ +Specifies the amount of data to store (in gigabytes) during an outage of the external log aggregator (defaults to 1). ++ +Equivalent to the `rsyslogd queue.maxDiskSpace` setting. + +* `LOG_AGGREGATOR_ACTION_QUEUE_SIZE`: Maximum number of messages that can be stored in the log action queue. ++ +Defines how large the rsyslog action queue can grow in number of messages stored. +This can have an impact on memory use. +When the queue reaches 75% of this number, the queue starts writing to disk (`queue.highWatermark` in `rsyslog`). +When it reaches 90%, `NOTICE`, `INFO`, and `DEBUG` messages start to be discarded (`queue.discardMark` with 'queue.discardSeverity=5`). ++ +Equivalent to the `rsyslogd queue.size` setting on the action. + +It stores files in the directory specified by `LOG_AGGREGATOR_MAX_DISK_USAGE_PATH`. + * `LOG_AGGREGATOR_MAX_DISK_USAGE_PATH`: Specifies the location to store logs that should be retried after an outage of the external log aggregator (defaults to `/var/lib/awx`). Equivalent to the `rsyslogd queue.spoolDirectory` setting. diff --git a/downstream/assemblies/platform/assembly-controller-management-jobs.adoc b/downstream/assemblies/platform/assembly-controller-management-jobs.adoc index a5dabcf030..ff9260d457 100644 --- a/downstream/assemblies/platform/assembly-controller-management-jobs.adoc +++ b/downstream/assemblies/platform/assembly-controller-management-jobs.adoc @@ -12,7 +12,8 @@ image:management-jobs.png[Management jobs] The following job types are available for you to schedule and launch: * *Cleanup Activity Stream*: Remove activity stream history older than a specified number of days -* *Cleanup Expired OAuth 2 Tokens*: Remove expired OAuth 2 access tokens and refresh tokens +// [emcwhinn] Removing as part of AAP-37805 +// * *Cleanup Expired OAuth 2 Tokens*: Remove expired OAuth 2 access tokens and refresh tokens * *Cleanup Expired Sessions*: Remove expired browser sessions from the database * *Cleanup Job Details*: Remove job history older than a specified number of days diff --git a/downstream/assemblies/platform/assembly-controller-organizations.adoc b/downstream/assemblies/platform/assembly-controller-organizations.adoc index 779971e06e..744c4a541e 100644 --- a/downstream/assemblies/platform/assembly-controller-organizations.adoc +++ b/downstream/assemblies/platform/assembly-controller-organizations.adoc @@ -1,37 +1,40 @@ -[id="assembly-controller-organizations"] +ifdef::context[:parent-context: {context}] -ifdef::controller-GS[] -= Managing organizations in {ControllerName} +:_mod-docs-content-type: ASSEMBLY -An organization is a logical collection of users, teams, projects, and inventories. -It is the highest level object in the controller object hierarchy. -After you have created an organization, {ControllerName} displays the organization details. -You can then manage access and execution environments for the organization. +[id="assembly-controller-organizations_{context}"] -image::controller-tower-hierarchy.png[Hierarchy] += Organizations + +:context: access-mgmt-orgs + +An organization is a logical collection of users, teams, and resources. It is the highest level object in the {PlatformNameShort} object hierarchy. After you have created an organization, {PlatformNameShort} displays the organization details. You can then manage access and execution environments for the organization. +{PlatformNameShort} automatically creates a default organization and the system administrator is automatically assigned to this organization. If you have a Self-support level license, you have only the default organization available and must not delete it. + +// [ddacosta] Removed this statement because I think it was relevant when this content was upstream but in the downstream docs, it’s implied that you have a license. +//[NOTE] +//==== +//Only Enterprise or Premium licenses can add new organizations. +//==== +//Enterprise and Premium license users who want to add a new organization should refer to the xref:proc-controller-create-organization[Creating an organization]. include::platform/proc-controller-review-organizations.adoc[leveloffset=+1] -include::platform/proc-controller-edit-an-organization.adoc[leveloffset=+1] -endif::controller-GS[] -ifdef::controller-UG[] -= Organizations -An organization is a logical collection of users, teams, projects, and inventories. -It is the highest level object in the controller object hierarchy. +include::platform/proc-controller-create-organization.adoc[leveloffset=+1] -image::controller-tower-hierarchy.png[Hierarchy] +include::platform/con-controller-access-organizations.adoc[leveloffset=+1] -From the navigation menu, select btn:[Organizations] to display the existing organizations for your installation. +include::platform/proc-controller-add-organization-user.adoc[leveloffset=+2] -image:organizations-home-showing-example-organization.png[Organizations] +include::platform/proc-gw-add-admin-organization.adoc[leveloffset=+2] -Organizations can be searched by *Name* or *Description*. +include::platform/proc-gw-add-team-organization.adoc[leveloffset=+2] -Modify organizations using the image:leftpencil.png[Edit,15,15] icon. -Click btn:[Delete] to remove a selected organization. +include::platform/proc-gw-delete-organization.adoc[leveloffset=+2] -include::platform/proc-controller-create-organization.adoc[leveloffset=+1] -include::platform/con-controller-access-organizations.adoc[leveloffset=+1] +include::platform/ref-controller-organization-notifications.adoc[leveloffset=+1] -endif::controller-UG[] +include::platform/proc-gw-organizations-exec-env.adoc[leveloffset=+1] +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/platform/assembly-controller-secret-management.adoc b/downstream/assemblies/platform/assembly-controller-secret-management.adoc index e10ff5408c..1cac3be928 100644 --- a/downstream/assemblies/platform/assembly-controller-secret-management.adoc +++ b/downstream/assemblies/platform/assembly-controller-secret-management.adoc @@ -23,7 +23,7 @@ These external secret values are fetched before running a playbook that needs th .Additional resources -For more information about specifying secret management system credentials in the user interface, see xref:controller-credentials[Managing user credentials]. +For more information about specifying secret management system credentials in the user interface, see link:{URLControllerUserGuide}/index#controller-credentials[Managing user credentials]. include::platform/proc-controller-configure-secret-lookups.adoc[leveloffset=+1] include::platform/ref-controller-metadata-credential-input.adoc[leveloffset=+2] diff --git a/downstream/assemblies/platform/assembly-controller-security.adoc b/downstream/assemblies/platform/assembly-controller-security.adoc index 8580a9127d..4794e615ee 100644 --- a/downstream/assemblies/platform/assembly-controller-security.adoc +++ b/downstream/assemblies/platform/assembly-controller-security.adoc @@ -11,8 +11,10 @@ This protection ensures that jobs can only access playbooks, roles, and data fro For credential security, you can upload locked SSH keys and set the unlock password to "ask". You can also have the system prompt you for SSH credentials or sudo passwords rather than having the system store them in the database. +//Moved to jobs include::platform/con-controller-playbook-access-info-sharing.adoc[leveloffset=+1] include::platform/ref-controller-isolation-functionality.adoc[leveloffset=+2] + include::platform/con-controller-rbac.adoc[leveloffset=+1] include::platform/con-controller-role-hierarchy.adoc[leveloffset=+2] include::platform/ref-controller-applying-rbac.adoc[leveloffset=+3] @@ -25,6 +27,5 @@ include::platform/ref-controller-rbac-user-view.adoc[leveloffset=+4] include::platform/ref-controller-rbac-roles.adoc[leveloffset=+3] include::platform/ref-controller-rbac-built-in-roles.adoc[leveloffset=+4] include::platform/ref-controller-rbac-personas.adoc[levelffset=+3] - include::platform/con-controller-function-of-roles.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-controller-teams.adoc b/downstream/assemblies/platform/assembly-controller-teams.adoc index 4cd9299a90..2d7657b1e6 100644 --- a/downstream/assemblies/platform/assembly-controller-teams.adoc +++ b/downstream/assemblies/platform/assembly-controller-teams.adoc @@ -1,24 +1,32 @@ ifdef::context[:parent-context: {context}] -[id="assembly-controller-teams"] +:_mod-docs-content-type: ASSEMBLY -:context: controller-teams -= Managing teams +[id="assembly-controller-teams_{context}"] -A *Team* is a subdivision of an organization with associated users, projects, credentials, and permissions. -Teams offer a means to implement role-based access control schemes and delegate responsibilities across organizations. -For example, you can grant permissions to a whole team rather than to each user on the team. += Teams -From the navigation panel, select {MenuControllerTeams}. +:context: controller-teams -image:organizations-teams-list.png[Teams list] +A team is a subdivision of an organization with associated users, and resources. Teams provide a means to implement role-based access control schemes and delegate responsibilities across organizations. For instance, you can grant permissions to a Team rather than each user on the team. -You can sort and search the team list and searched by *Name* or *Organization*. +You can create as many teams as needed for your organization. Teams can only be assigned to one organization while an organization can be made up of multiple teams. Each team can be assigned roles, the same way roles are assigned for users. Teams can also scalably assign ownership for credentials, preventing multiple interface click-throughs to assign the same credentials to the same user. -Click the Edit image:leftpencil.png[Edit,15,15] icon next to the entry to edit information about the team. -You can also review *Users* and *Permissions* associated with this team. +include::platform/proc-gw-team-list-view.adoc[leveloffset=+1] include::platform/proc-controller-creating-a-team.adoc[leveloffset=+1] +include::platform/proc-gw-team-add-user.adoc[leveloffset=+1] + +include::platform/proc-gw-team-remove-user.adoc[leveloffset=+1] + +include::platform/proc-gw-add-admin-team.adoc[leveloffset=+1] + +include::platform/proc-controller-user-permissions.adoc[leveloffset=+1] + +include::platform/proc-gw-remove-roles-team.adoc[leveloffset=+1] + +include::platform/proc-gw-delete-team.adoc[leveloffset=+1] + ifdef::parent-context[:context: {parent-context}] -ifndef::parent-context[:!context:] +ifndef::parent-context[:!context:] \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-controller-topology-viewer.adoc b/downstream/assemblies/platform/assembly-controller-topology-viewer.adoc index a63eb1b86b..37ad60f013 100644 --- a/downstream/assemblies/platform/assembly-controller-topology-viewer.adoc +++ b/downstream/assemblies/platform/assembly-controller-topology-viewer.adoc @@ -1,13 +1,13 @@ [id="assembly-controller-topology-viewer"] -= Topology viewer += Topology View -Use the topology viewer to view node type, node health, and specific details about each node if you already have a mesh topology deployed. +Use the *Topology View* to view node type, node health, and specific details about each node if you already have a mesh topology deployed. To access the topology viewer from the {ControllerName} UI, you must have *System Administrator* permissions. -For more information about {AutomationMesh} on a VM-based installation, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_automation_mesh_guide_for_vm-based_installations/index[{PlatformName} {AutomationMesh} guide for VM-based installations]. +For more information about {AutomationMesh} on a VM-based installation, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_mesh_for_vm_environments/index[{AutomationMeshStart} for VM environments]. -For more information about {AutomationMesh} on an operator-based installation, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_automation_mesh_for_operator-based_installations/index[{PlatformName} {AutomationMesh} for operator-based installations]. +For more information about {AutomationMesh} on an operator-based installation, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_mesh_for_managed_cloud_or_operator_environments/index[{AutomationMeshStart} for managed cloud or operator environments]. include::platform/proc-controller-access-topology-viewer.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-controller-user-interface.adoc b/downstream/assemblies/platform/assembly-controller-user-interface.adoc index dd572e02e1..b56c933827 100644 --- a/downstream/assemblies/platform/assembly-controller-user-interface.adoc +++ b/downstream/assemblies/platform/assembly-controller-user-interface.adoc @@ -6,31 +6,26 @@ ifdef::context[:parent-context: {context}] = The User Interface -The {ControllerName} _User Interface_ (UI) provides a graphical framework for your IT orchestration requirements. -The navigation panel provides quick access to {ControllerName} resources, such as *Projects*, *Inventories*, *Job Templates*, and *Jobs*. +The {MenuTopAE} _User Interface_ (UI) provides a graphical framework for your IT orchestration requirements. -[NOTE] -==== -The {ControllerName} UI is also available as a technical preview and is subject to change in future releases. -To preview the new UI, click the *Enable Preview of New User Interface* toggle to *On* from the *Miscellaneous System* option of the *Settings* menu. - -//image:configure-tower-system-misc-preview-newui.png[image] +Access your user profile, the *About* page, view related documentation, or log out using the icons in the page header. -After saving, logout and log back in to access the new UI from the preview banner. -To return to the current UI, click the link on the top banner where indicated. +The navigation panel provides quick access to {ControllerName} resources, such as *Jobs*, *Templates*, *Schedules*, *Projects*, *Infrastructure*, and *Administration*. -//image:ug-dashboard-preview-banner.png[image] -==== -Access your user profile, the *About* page, view related documentation, or log out using the icons in the page header. +* xref:controller-jobs[Jobs] +* xref:controller-job-templates[Job templates] +* xref:controller-workflow-job-templates[Workflow job templates] +* xref:controller-schedules[Schedules] +* xref:controller-projects[Projects] -You can view the activity stream for that user by clicking the btn:[Activity Stream] image:activitystream.png[activitystream,15,15] icon. +//You can view the activity stream for that user by clicking the btn:[Activity Stream] image:activitystream.png[activitystream,15,15] icon. -include::platform/con-controller-views.adoc[leveloffset=+1] -include::platform/con-controller-resources.adoc[leveloffset=+1] -include::platform/con-controller-access.adoc[leveloffset=+1] +//include::platform/con-controller-views.adoc[leveloffset=+1] +//include::platform/con-controller-resources.adoc[leveloffset=+1] +//include::platform/con-controller-access.adoc[leveloffset=+1] +include::platform/con-controller-infrastructure.adoc[leveloffset=+1] include::platform/con-controller-administration.adoc[leveloffset=+1] //For next version/tech preview //include::platform/con-controller-analytics.adoc[leveloffset=+1] not created yet -//Settings not included in tech preview include::platform/con-controller-settings.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-controller-users.adoc b/downstream/assemblies/platform/assembly-controller-users.adoc index b0e3a56ebd..8214004dd6 100644 --- a/downstream/assemblies/platform/assembly-controller-users.adoc +++ b/downstream/assemblies/platform/assembly-controller-users.adoc @@ -1,19 +1,33 @@ -[id="assembly-controller-users"] +ifdef::context[:parent-context: {context}] -ifdef::controller-GS[] -= User roles in {ControllerName} -endif::controller-GS[] -ifdef::controller-UG[] -= Managing Users in {ControllerName} -endif::controller-UG[] +[id="assembly-controller-users_{context}"] += Users + +:context: access-mgmt-users + +Users associated with an organization are shown in the *Users* tab of the organization. + +You can add other users to an organization, including a normal user or system administrator, but first, you must create them. + +[NOTE] +==== +{PlatformNameShort} automatically creates a default admin user so they can log in and set up {PlatformNameShort} for their organization. This user can not be deleted or modified. + +==== +You can sort or search the User list by *Username*, *First name*, *Last name*, or *Email*. Click the arrows in the header to toggle your sorting preference. +You can view *User type* and *Email* beside the user name on the Users page. + +include::platform/proc-gw-users-list-view.adoc[leveloffset=+1] -include::platform/con-controller-create-users.adoc[leveloffset=+1] -ifdef::controller-UG[] include::platform/proc-controller-creating-a-user.adoc[leveloffset=+1] + +include::platform/proc-gw-editing-a-user.adoc[leveloffset=+1] + include::platform/proc-controller-deleting-a-user.adoc[leveloffset=+1] -include::platform/ref-controller-user-organizations.adoc[leveloffset=+1] -include::platform/ref-controller-user-teams.adoc[leveloffset=+1] + include::platform/ref-controller-user-roles.adoc[leveloffset=+1] -include::platform/proc-controller-user-permissions.adoc[leveloffset=+2] -include::platform/proc-controller-user-tokens.adoc[leveloffset=+1] -endif::controller-UG[] + +include::platform/proc-gw-remove-roles-user.adoc[leveloffset=+1] + +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/platform/assembly-custom-inventory-scripts.adoc b/downstream/assemblies/platform/assembly-custom-inventory-scripts.adoc index b6a40e541f..fc4a1778bb 100644 --- a/downstream/assemblies/platform/assembly-custom-inventory-scripts.adoc +++ b/downstream/assemblies/platform/assembly-custom-inventory-scripts.adoc @@ -6,11 +6,11 @@ ==== Inventory scripts have been discontinued. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-inventories#ref-controller-export-old-scripts[Export old inventory scripts] in the _{ControllerUG}_. +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-inventories#ref-controller-export-old-scripts[Export old inventory scripts] in _{ControllerUG}_. ==== If you use custom inventory scripts, migrate to sourcing these scripts from a project. -For more information, see xref:assembly-inventory-file-importing[Inventory File Importing], and link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-inventories#ref-controller-inventory-sources[Inventory sources] in the _{ControllerUG}_. +For more information, see xref:assembly-inventory-file-importing[Inventory File Importing], and link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-inventories#ref-controller-inventory-sources[Inventory sources] in _{ControllerUG}_. If you are setting up an inventory file, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/assembly-platform-install-scenario#proc-editing-installer-inventory-file_platform-install-scenario[Editing the Red Hat Ansible Automation Platform installer inventory file] and find examples specific to your setup. diff --git a/downstream/assemblies/platform/assembly-deploy-eda-controller-on-aap-operator.adoc b/downstream/assemblies/platform/assembly-deploy-eda-controller-on-aap-operator.adoc index 78a32c8bfd..add161366b 100644 --- a/downstream/assemblies/platform/assembly-deploy-eda-controller-on-aap-operator.adoc +++ b/downstream/assemblies/platform/assembly-deploy-eda-controller-on-aap-operator.adoc @@ -3,14 +3,14 @@ ifdef::context[:parent: {context}] [id="deploy-eda-controller-on-aap-operator-ocp"] -= Deploying {EDAcontroller} with {OperatorPlatform} on {OCPShort} += Deploying {EDAcontroller} with {OperatorPlatformName} on {OCP} :context: deploying [role="_abstract"] {EDAcontroller} is the interface for event-driven automation and introduces automated resolution of IT requests. This component helps you connect to sources of events and acts on those events using rulebooks. When you deploy {EDAcontroller}, you can automate decision making, use numerous event sources, implement event-driven automation within and across multiple IT use cases, and achieve more efficient service delivery. -Use the following instructions to install {EDAName} with your {OperatorPlatform} on {OCPShort}. +Use the following instructions to install {EDAName} with your {OperatorPlatformNameShort} on {OCPShort}. include::platform/proc-deploy-eda-controller-with-aap-operator-ocp.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-disconnected-installation.adoc b/downstream/assemblies/platform/assembly-disconnected-installation.adoc index a823e7d6d1..50684757f2 100644 --- a/downstream/assemblies/platform/assembly-disconnected-installation.adoc +++ b/downstream/assemblies/platform/assembly-disconnected-installation.adoc @@ -13,11 +13,13 @@ If you are not connected to the internet or do not have access to online reposit Before installing {PlatformNameShort} on a disconnected network, you must meet the following prerequisites: -. A created subscription manifest. See link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_operations_guide/assembly-aap-obtain-manifest-files#doc-wrapper[Obtaining a manifest file] for more information. +* A subscription manifest that you can upload to the platform. ++ +For more information, see link:{URLCentralAuth}/assembly-gateway-licensing#assembly-aap-obtain-manifest-files[Obtaining a manifest file]. -. The {PlatformNameShort} setup bundle at link:{PlatformDownloadUrl}[Customer Portal] is downloaded. +* The {PlatformNameShort} setup bundle at link:{PlatformDownloadUrl}[Customer Portal] is downloaded. -. The link:https://docs.ansible.com/ansible/latest/collections/community/general/nsupdate_module.html[DNS records] for the {ControllerName} and {PrivateHubName} servers are created. +* The link:https://docs.ansible.com/ansible/latest/collections/community/general/nsupdate_module.html[DNS records] for the {ControllerName} and {PrivateHubName} servers are created. include::platform/con-aap-installation-on-disconnected-rhel.adoc[leveloffset=+1] @@ -28,29 +30,39 @@ include::platform/proc-creating-a-new-web-server-to-host-repositories.adoc[level include::platform/proc-accessing-rpm-repositories-for-locally-mounted-dvd.adoc[leveloffset=+1] -include::platform/proc-adding-a-subscription-manifest-to-aap-without-an-internet-connection.adoc[leveloffset=+1] +//include::platform/proc-adding-a-subscription-manifest-to-aap-without-an-internet-connection.adoc[leveloffset=+1] +//removed for 2.5 changes AAP-30807 made by rjgrange include::platform/proc-installing-the-aap-setup-bundle.adoc[leveloffset=+1] include::platform/proc-completing-post-installation-tasks.adoc[leveloffset=+1] -include::platform/proc-importing-collections-into-private-automation-hub.adoc[leveloffset=+1] +//include::platform/proc-importing-collections-into-private-automation-hub.adoc[leveloffset=+1] +//removed for 2.5 changes AAP-30807 made by rjgrange -include::platform/proc-creating-collection-namespace.adoc[leveloffset=+1] +//include::platform/proc-creating-collection-namespace.adoc[leveloffset=+1] +//removed for 2.5 changes AAP-30807 made by rjgrange -include::platform/proc-approving-the-imported-collection.adoc[leveloffset=+1] +//include::platform/proc-approving-the-imported-collection.adoc[leveloffset=+1] +//removed for 2.5 changes AAP-30807 made by rjgrange -include::platform/con-building-an-execution-environment-in-a-disconnected-environment.adoc[leveloffset=+1] +//include::platform/con-building-an-execution-environment-in-a-disconnected-environment.adoc[leveloffset=+1] +//removed for 2.5 changes AAP-30807 made by rjgrange -include::platform/proc-installing-the-ansible-builder-rpm.adoc[leveloffset=+2] +//include::platform/proc-installing-the-ansible-builder-rpm.adoc[leveloffset=+2] +//removed for 2.5 changes AAP-30807 made by rjgrange -include::platform/proc-creating-the-custom-execution-environment-definition.adoc[leveloffset=+2] +//include::platform/proc-creating-the-custom-execution-environment-definition.adoc[leveloffset=+2] +//removed for 2.5 changes AAP-30807 made by rjgrange -include::platform/proc-building-the-custom-execution-environment.adoc[leveloffset=+2] +//include::platform/proc-building-the-custom-execution-environment.adoc[leveloffset=+2] +//removed for 2.5 changes AAP-30807 made by rjgrange -include::platform/proc-uploading-the-custom-execution-environment-to-the-private-hub.adoc[leveloffset=+2] +//include::platform/proc-uploading-the-custom-execution-environment-to-the-private-hub.adoc[leveloffset=+2] +//removed for 2.5 changes AAP-30807 made by rjgrange -include::platform/proc-upgrading-between-minor-aap-releases.adoc[leveloffset=+1] +// Removing references to upgrades for 2.5-ea - AAP-17771 +// include::platform/proc-upgrading-between-minor-aap-releases.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/platform/assembly-edge-manager-config.adoc b/downstream/assemblies/platform/assembly-edge-manager-config.adoc new file mode 100644 index 0000000000..a316798185 --- /dev/null +++ b/downstream/assemblies/platform/assembly-edge-manager-config.adoc @@ -0,0 +1,10 @@ +[id="assembly-edge-manager-config"] + += Configuring {RedHatEdge} + +How to deploy and configure a {RedHatEdge} service. + +include::platform/ref-edge-manager-field-selectors.adoc[leveloffset=+1] +include::platform/ref-edge-manager-additional-fields.adoc[leveloffset=+2] +include::platform/ref-edge-manager-fields-discovery.adoc[leveloffset=+2] +include::platform/ref-edge-manager-supported-operators.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-edge-manager-install.adoc b/downstream/assemblies/platform/assembly-edge-manager-install.adoc new file mode 100644 index 0000000000..f37b587755 --- /dev/null +++ b/downstream/assemblies/platform/assembly-edge-manager-install.adoc @@ -0,0 +1,6 @@ +[id="assembly-edge-manager-install"] + += Enabling the {RedHatEdge} + +Enable the {RedHatEdge} to manage edge devices and applications at scale. + diff --git a/downstream/assemblies/platform/assembly-edge-manager-manage-devices.adoc b/downstream/assemblies/platform/assembly-edge-manager-manage-devices.adoc new file mode 100644 index 0000000000..ab35c35dd6 --- /dev/null +++ b/downstream/assemblies/platform/assembly-edge-manager-manage-devices.adoc @@ -0,0 +1,17 @@ +[id="assembly-edge-manager-manage-devices"] + += Managing devices + +Manage individual devices to oversee their configurations, deployments, and operational statuses. + +include::platform/proc-edge-manager-manage-apps.adoc[leveloffset=+1] +include::platform/proc-edge-manager-manage-apps-ui.adoc[leveloffset=+2] +include::platform/proc-edge-manager-manage-apps-cli.adoc[leveloffset=+2] +include::platform/proc-edge-manager-create-apps.adoc[leveloffset=+2] +include::platform/ref-edge-manager-device-lifecycle.adoc[leveloffset=+1] +include::platform/ref-edge-manager-monitor-device.adoc[leveloffset=+1] +include::platform/proc-edge-manager-monitor-device-resources-web-ui.adoc[leveloffset=+2] +include::platform/proc-edge-manager-monitor-device-resources-cli.adoc[leveloffset=+2] +include::platform/con-edge-manager-access-devices.adoc[leveloffset=+1] +include::platform/proc-edge-manager-access-devices-web-ui.adoc[leveloffset=+2] +include::platform/proc-edge-manager-access-devices-cli.adoc[leveloffset=+2] diff --git a/downstream/assemblies/platform/assembly-edge-manager-troubleshooting.adoc b/downstream/assemblies/platform/assembly-edge-manager-troubleshooting.adoc new file mode 100644 index 0000000000..0796bf17d4 --- /dev/null +++ b/downstream/assemblies/platform/assembly-edge-manager-troubleshooting.adoc @@ -0,0 +1,9 @@ +[id="assembly-edge-manager-troubleshooting"] + += Troubleshooting {RedHatEdge} + +When working with devices in {RedHatEdge}, you might see issues related to configuration, connectivity, or deployment. +Troubleshooting these issues requires understanding how device configurations are applied, how to check logs, and how to verify communication between the device and the service. + +include::platform/proc-edge-manager-view-device-config.adoc[leveloffset=+1] +include::platform/proc-edge-manager-generate-device-log.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-gateway-licensing.adoc b/downstream/assemblies/platform/assembly-gateway-licensing.adoc new file mode 100644 index 0000000000..aa5d755988 --- /dev/null +++ b/downstream/assemblies/platform/assembly-gateway-licensing.adoc @@ -0,0 +1,21 @@ +ifdef::context[:parent-context: {context}] + +[id="assembly-gateway-licensing"] += Managing {PlatformNameShort} licensing, updates and support + +:context: licensing-gw + +Ansible is an open source software project and is licensed under the GNU General Public License version 3, as described in the link:https://github.com/ansible/ansible/blob/devel/COPYING[Ansible Source Code]. + +You must have valid subscriptions attached before installing {PlatformNameShort}. + +For more information, see xref:proc-attaching-subscriptions[Attaching Subscriptions]. + +include::platform/ref-controller-trial-evaluation.adoc[leveloffset=+1] +include::platform/ref-controller-licenses.adoc[leveloffset=+1] +include::platform/ref-controller-node-counting.adoc[leveloffset=+1] +include::platform/ref-controller-subscription-types.adoc[leveloffset=+1] +include::platform/proc-attaching-subscriptions.adoc[leveloffset=+1] +include::assembly-aap-manifest-files.adoc[leveloffset=+1] +include::assembly-aap-activate.adoc[leveloffset=+1] + \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-gs-auto-dev.adoc b/downstream/assemblies/platform/assembly-gs-auto-dev.adoc new file mode 100644 index 0000000000..d9ae191a8f --- /dev/null +++ b/downstream/assemblies/platform/assembly-gs-auto-dev.adoc @@ -0,0 +1,57 @@ +ifdef::context[:parent-context-of-assembly-gs-auto-dev: {context}] + +:_mod-docs-content-type: ASSEMBLY + +ifndef::context[] +[id="assembly-gs-auto-dev"] +endif::[] +ifdef::context[] +[id="assembly-gs-auto-dev_{context}"] +endif::[] + +:context: assembly-gs-auto-dev + += Getting started as an automation developer + +As an automation developer, you can use {PlatformNameShort} to implement your organization's automation strategy. +{PlatformNameShort} can help you write, test, and share automation content, and download and use Red Hat certified collections. +This guide will walk you through the basic steps to get set up as an automation developer on {PlatformNameShort}, with information on how to: + +* Set up your development environment +* Create, publish, and use custom automation content +* Build and use {ExecEnvName} and decision environments +* Create and run rulebook activations for {EDAName} +* Create and use job templates + +include::platform/con-gs-setting-up-dev-env.adoc[leveloffset=+1] +include::platform/con-gs-create-automation-content.adoc[leveloffset=+1] +include::platform/con-gs-define-events-rulebooks.adoc[leveloffset=+1] +include::platform/con-gs-ansible-roles.adoc[leveloffset=+1] +include::platform/proc-creating-ansible-role.adoc[leveloffset=+2] +include::platform/con-gs-learn-about-collections.adoc[leveloffset=+1] +include::platform/proc-gs-publish-to-a-collection.adoc[leveloffset=+1] +include::platform/proc-gs-upload-collection.adoc[leveloffset=+2] +include::platform/con-gs-execution-env.adoc[leveloffset=+1] +include::platform/proc-gs-use-base-execution-env.adoc[leveloffset=+2] +include::platform/proc-gs-add-ee-to-job-template.adoc[leveloffset=+2] +include::platform/con-gs-build-decision-env.adoc[leveloffset=+1] +include::platform/proc-gs-auto-dev-set-up-decision-env.adoc[leveloffset=+2] +include::platform/proc-gs-auto-dev-create-automation-execution-proj.adoc[leveloffset=+1] +include::platform/proc-gs-auto-dev-create-automation-decision-proj.adoc[leveloffset=+1] +include::platform/con-gs-auto-dev-about-inv.adoc[leveloffset=+1] +// [hherbly] this repeats module above include::platform/proc-controller-create-inventory.adoc[leveloffset=+2] +include::platform/con-gs-auto-dev-job-templates.adoc[leveloffset=+1] +include::platform/proc-controller-getting-started-with-job-templates.adoc[leveloffset=+2] +include::platform/proc-gs-auto-dev-create-template.adoc[leveloffset=+2] +// [hherbly] incomplete module? include::platform/proc-gs-auto-dev-run-template.adoc[leveloffset=+2] +include::platform/proc-controller-edit-job-template.adoc[leveloffset=+2] +include::platform/con-gs-rulebook-activations.adoc[leveloffset=+1] +include::platform/proc-gs-eda-set-up-rulebook-activation.adoc[leveloffset=+2] +include::eda/con-eda-rulebook-activation-list-view.adoc[leveloffset=+3] +include::eda/proc-eda-enable-rulebook-activations.adoc[leveloffset=+2] +include::eda/proc-eda-restart-rulebook-activations.adoc[leveloffset=+2] +include::eda/proc-eda-delete-rulebook-activations.adoc[leveloffset=+2] +include::eda/proc-eda-activate-webhook.adoc[leveloffset=+2] + +ifdef::parent-context-of-assembly-gs-auto-dev[:context: {parent-context-of-assembly-gs-auto-dev}] +ifndef::parent-context-of-assembly-gs-auto-dev[:!context:] diff --git a/downstream/assemblies/platform/assembly-gs-auto-op.adoc b/downstream/assemblies/platform/assembly-gs-auto-op.adoc new file mode 100644 index 0000000000..55e70c1981 --- /dev/null +++ b/downstream/assemblies/platform/assembly-gs-auto-op.adoc @@ -0,0 +1,55 @@ +ifdef::context[:parent-context-of-assembly-gs-auto-op: {context}] + +:_mod-docs-content-type: ASSEMBLY + +ifndef::context[] +[id="assembly-gs-auto-op"] +endif::[] +ifdef::context[] +[id="assembly-gs-auto-op_{context}"] +endif::[] + +:context: assembly-gs-auto-op + += Getting started as an automation operator + +As an automation operator, {PlatformNameShort} can help you organize and manage automation projects using Red Hat certified collections or custom content for your organization. + +To get started as a platform operator, see the following sections: + +* xref:con-gs-playbooks[Get started with playbooks] +* xref:proc-gs-publish-to-a-collection_{context}[Publishing to a collection in a source code manager] +* xref:proc-gs-auto-op-projects[Automation execution projects] +* xref:con-gs-execution-env_{context}[Build and use an execution environment] +* xref:con-gs-auto-op-job-templates[Job templates] +* xref:con-gs-auto-op-about-inv[About inventories] +* xref:con-gs-automation-execution-jobs[Automation execution jobs] + +include::platform/con-gs-playbooks.adoc[leveloffset=+1] +include::platform/proc-gs-write-playbook.adoc[leveloffset=+1] +include::platform/con-gs-ansible-roles.adoc[leveloffset=+1] +include::platform/proc-creating-ansible-role.adoc[leveloffset=+2] +include::platform/con-gs-ansible-content.adoc[leveloffset=+1] +include::platform/con-gs-learn-about-collections.adoc[leveloffset=+2] +// [hherbly] removing because it repeats modules above include::platform/proc-gs-browse-content.adoc[leveloffset=+2] +include::platform/proc-gs-downloading-content.adoc[leveloffset=+2] +// +include::platform/proc-gs-publish-to-a-collection.adoc[leveloffset=+1] +include::platform/con-gs-manage-collections.adoc[leveloffset=+2] +include::platform/proc-gs-upload-collection.adoc[leveloffset=+2] +// +include::platform/con-gs-execution-env.adoc[leveloffset=+1] +include::platform/proc-gs-use-base-execution-env.adoc[leveloffset=+2] +include::platform/proc-controller-use-an-exec-env.adoc[leveloffset=+2] +// +include::platform/proc-gs-auto-op-projects.adoc[leveloffset=+1] +include::platform/con-gs-auto-op-job-templates.adoc[leveloffset=+1] +include::platform/proc-gs-auto-op-launch-template.adoc[leveloffset=+2] +include::platform/con-gs-auto-op-about-inv.adoc[leveloffset=+1] +include::platform/con-gs-auto-op-execute-inv.adoc[leveloffset=+2] +include::platform/con-gs-automation-execution-jobs.adoc[leveloffset=+1] +include::platform/proc-gs-auto-op-review-job-status.adoc[leveloffset=+2] +include::platform/proc-gs-auto-op-review-job-output.adoc[leveloffset=+2] + +ifdef::parent-context-of-assembly-gs-auto-op[:context: {parent-context-of-assembly-gs-auto-op}] +ifndef::parent-context-of-assembly-gs-auto-op[:!context:] diff --git a/downstream/assemblies/platform/assembly-gs-key-functionality.adoc b/downstream/assemblies/platform/assembly-gs-key-functionality.adoc new file mode 100644 index 0000000000..e8f74502cb --- /dev/null +++ b/downstream/assemblies/platform/assembly-gs-key-functionality.adoc @@ -0,0 +1,34 @@ +ifdef::context[:parent-context-of-assembly-gs-key-functionality: {context}] + +:_mod-docs-content-type: ASSEMBLY + +ifndef::context[] +[id="assembly-gs-key-functionality"] +endif::[] +ifdef::context[] +[id="assembly-gs-key-functionality_{context}"] +endif::[] + +:context: assembly-gs-key-functionality + += Key functionality and concepts + +With {PlatformNameShort}, you can create, manage, and scale automation for your organization across users, teams, and regions. See the following functionality and concepts of {PlatformNameShort} for more details. + +The release of {PlatformNameShort} {PlatformVers} introduces an updated, unified user interface (UI) that allows you to interact with and manage each part of the platform. + +include::snippets/snip-gateway-component-description.adoc[leveloffset=+1] +include::platform/con-gw-activity-stream.adoc[leveloffset=+1] + +include::platform/con-gs-automation-execution.adoc[leveloffset=+1] +include::platform/con-gs-automation-content.adoc[leveloffset=+1] +include::platform/con-gs-automation-decisions.adoc[leveloffset=+1] +include::platform/con-gs-automation-mesh.adoc[leveloffset=+1] +include::platform/con-gs-ansible-lightspeed.adoc[leveloffset=+1] +include::platform/con-gs-developer-tools.adoc[leveloffset=+1] +include::platform/ref-gs-install-config.adoc[leveloffset=+1] +include::platform/con-gs-dashboard-components.adoc[leveloffset=+1] +include::platform/con-gs-final-set-up.adoc[leveloffset=+1] + +ifdef::parent-context-of-assembly-gs-key-functionality[:context: {parent-context-of-assembly-gs-key-functionality}] +ifndef::parent-context-of-assembly-gs-key-functionality[:!context:] diff --git a/downstream/assemblies/platform/assembly-gs-platform-admin.adoc b/downstream/assemblies/platform/assembly-gs-platform-admin.adoc new file mode 100644 index 0000000000..08337dba71 --- /dev/null +++ b/downstream/assemblies/platform/assembly-gs-platform-admin.adoc @@ -0,0 +1,21 @@ +[id="assembly-gs-platform-admin"] + += Getting started as a platform administrator + +As a platform administrator, {PlatformNameShort} can help you enable your users and teams to develop and run automation. + +This guide walks you through the basic steps to get set up as an administrator for {PlatformNameShort}, including configuring and maintaining the platform for users. + +To get started as an administrator, see the following: + +* xref:proc-gs-logging-in[Logging in for the first time] +* xref:con-gs-config-authentication[Configure authentication] +* xref:con-gs-manage-RBAC[Managing user access with role-based access control] + +include::platform/proc-gs-logging-in.adoc[leveloffset=+1] +include::platform/con-gs-config-authentication.adoc[leveloffset=+1] +include::platform/con-gs-manage-RBAC.adoc[leveloffset=+1] +include::platform/proc-controller-create-organization.adoc[leveloffset=+1] +include::platform/proc-controller-creating-a-team.adoc[leveloffset=+1] +include::platform/proc-gs-platform-admin-create-user.adoc[leveloffset=+1] +include::platform/proc-gs-social-auth-github.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-gw-config-authentication-type.adoc b/downstream/assemblies/platform/assembly-gw-config-authentication-type.adoc new file mode 100644 index 0000000000..143909d7b3 --- /dev/null +++ b/downstream/assemblies/platform/assembly-gw-config-authentication-type.adoc @@ -0,0 +1,60 @@ +ifdef::context[:parent-context: {context}] + +:_mod-docs-content-type: ASSEMBLY + +[id="gw-config-authentication-type"] + += Configuring an authentication type + +{PlatformNameShort} provides multiple authenticator plugins that you can configure to simplify the login experience for your organization. These are the authenticator plugins that are provided: + +* xref:gw-local-authentication[Local] +* xref:controller-set-up-LDAP[LDAP] +* xref:controller-set-up-SAML[SAML] +* xref:controller-set-up-tacacs[TACACS+] +* xref:controller-set-up-radius[Radius] +* xref:controller-set-up-azure[Azure] +* xref:proc-controller-google-oauth2-settings[Google OAuth] +* xref:controller-set-up-generic-oidc[Generic OIDC] +* xref:gw-keycloak-authentication[Keycloak] +* xref:proc-controller-github-settings[GitHub] +* xref:proc-controller-github-organization-settings[GitHub organization] +* xref:proc-controller-github-team-settings[GitHub team] +* xref:proc-controller-github-enterprise-settings[GitHub enterprise] +* xref:proc-controller-github-enterprise-org-settings[GitHub enterprise organization] +* xref:proc-controller-github-enterprise-team-settings[GitHub enterprise team] + +include::platform/proc-gw-local-authentication.adoc[leveloffset=+1] + +include::platform/proc-controller-set-up-LDAP.adoc[leveloffset=+1] + +include::platform/proc-controller-set-up-SAML.adoc[leveloffset=+1] + +include::platform/proc-controller-configure-transparent-SAML.adoc[leveloffset=+2] + +include::platform/proc-controller-set-up-tacacs+.adoc[leveloffset=+1] + +include::platform/proc-controller-set-up-azure.adoc[leveloffset=+1] + +include::platform/proc-controller-google-oauth2-settings.adoc[leveloffset=+1] + +include::platform/proc-controller-set-up-generic-oidc.adoc[leveloffset=+1] + +include::platform/proc-gw-config-keycloak-settings.adoc[leveloffset=+1] + +include::platform/proc-controller-github-settings.adoc[leveloffset=+1] + +include::platform/proc-controller-github-organization-settings.adoc[leveloffset=+1] + +include::platform/proc-controller-github-team-settings.adoc[leveloffset=+1] + +include::platform/proc-controller-github-enterprise-settings.adoc[leveloffset=+1] + +include::platform/proc-controller-github-enterprise-org-settings.adoc[leveloffset=+1] + +include::platform/proc-controller-github-enterprise-team-settings.adoc[leveloffset=+1] + +include::platform/proc-controller-set-up-radius.adoc[leveloffset=+1] + +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/platform/assembly-gw-configure-authentication.adoc b/downstream/assemblies/platform/assembly-gw-configure-authentication.adoc new file mode 100644 index 0000000000..8fdb6c559e --- /dev/null +++ b/downstream/assemblies/platform/assembly-gw-configure-authentication.adoc @@ -0,0 +1,35 @@ +ifdef::context[:parent-context: {context}] + +:_mod-docs-content-type: ASSEMBLY + +[id="gw-configure-authentication"] + += Configuring authentication in the {PlatformNameShort} + +Using the authentication settings in {PlatformNameShort}, you can set up a simplified login through several authentication methods, such as LDAP and SAML. +Depending on the authentication method you select, you will be required to enter different information to complete the configuration. Be sure to include all the information required for your configuration needs. + +== Prerequisites + +* A running installation of {PlatformNameShort} {PlatformVers} +* A running instance of your authentication source +* Administrator rights to the {PlatformNameShort} +* Any connection information needed to connect {PlatformNameShort} {PlatformVers} to your source (see individual authentication types for details). + +include::platform/con-gw-pluggable-authentication.adoc[leveloffset=+1] + +include::platform/con-gw-create-authentication.adoc[leveloffset=+1] + +include::platform/proc-gw-select-auth-type.adoc[leveloffset=+2] + +include::platform/proc-gw-configure-auth-details.adoc[leveloffset=+2] + +include::platform/proc-gw-define-rules-triggers.adoc[leveloffset=+2] + +include::platform/proc-gw-adjust-mapping-order.adoc[leveloffset=+2] + +include::platform/proc-gw-review-auth-settings.adoc[leveloffset=+2] + +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] + \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-gw-dashboard.adoc b/downstream/assemblies/platform/assembly-gw-dashboard.adoc new file mode 100644 index 0000000000..bc24dd9f72 --- /dev/null +++ b/downstream/assemblies/platform/assembly-gw-dashboard.adoc @@ -0,0 +1,18 @@ +ifdef::context[:parent-context: {context}] + +:_mod-docs-content-type: ASSEMBLY + +[id="gw-dashboard_{context}"] += {PlatformNameShort} dashboard + +:context: gw-dashboard + +[role="_abstract"] +The {PlatformNameShort} dashboard provides automation management and monitoring capabilities, allowing you to administer and configure automation functions, as well as view recent job activity details and performance statistics related to it. + +include::platform/con-gw-dash-features.adoc[leveloffset=+1] + +include::platform/con-gw-dash-components.adoc[leveloffset=+1] + +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/platform/assembly-gw-managing-access.adoc b/downstream/assemblies/platform/assembly-gw-managing-access.adoc new file mode 100644 index 0000000000..c21124dd11 --- /dev/null +++ b/downstream/assemblies/platform/assembly-gw-managing-access.adoc @@ -0,0 +1,19 @@ +:_mod-docs-content-type: ASSEMBLY + +[id="gw-managing-access"] + += Managing access with role based access control + +:context: gw-manage-rbac + +Role-based access control (RBAC) restricts user access based on their role within an organization to which they are assigned in {PlatformNameShort}. The roles in RBAC refer to the levels of access that users have to the {PlatformNameShort} components and resources. + +You can control what users can do with the components of {PlatformNameShort} at a broad or granular level depending on your RBAC policy. You can designate whether the user is a system administrator or normal user and align roles and access permissions with their positions within the organization. + +Roles can be defined with multiple permissions that can then be assigned to resources, teams and users. The permissions that make up a role dictate what the assigned role allows. Permissions are allocated with only the access needed for a user to perform the tasks appropriate for their role. + +include::assembly-controller-organizations.adoc[leveloffset=+1] +include::assembly-controller-teams.adoc[leveloffset=+1] +include::assembly-controller-users.adoc[leveloffset=+1] +include::assembly-gw-resources.adoc[leveloffset=+1] + \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-gw-managing-authentication.adoc b/downstream/assemblies/platform/assembly-gw-managing-authentication.adoc new file mode 100644 index 0000000000..d2ee680bea --- /dev/null +++ b/downstream/assemblies/platform/assembly-gw-managing-authentication.adoc @@ -0,0 +1,22 @@ +ifdef::context[:parent-context: {context}] + +:_mod-docs-content-type: ASSEMBLY + +[id="gw-managing-authentication"] + += Managing authentication in {PlatformNameShort} + +After you have configured your authentication settings, you can view a list of authenticators, search, sort and view the details for each authenticator configured on the system. + +include::platform/proc-gw-authentication-list-view.adoc[leveloffset=+1] + +include::platform/proc-gw-searching-authenticator.adoc[leveloffset=+1] + +include::platform/proc-gw-display-auth-details.adoc[leveloffset=+1] + +include::platform/proc-gw-edit-authenticator.adoc[leveloffset=+1] + +include::platform/proc-gw-delete-authenticator.adoc[leveloffset=+1] + +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/platform/assembly-gw-mapping.adoc b/downstream/assemblies/platform/assembly-gw-mapping.adoc new file mode 100644 index 0000000000..d9abde4cc2 --- /dev/null +++ b/downstream/assemblies/platform/assembly-gw-mapping.adoc @@ -0,0 +1,25 @@ +:_mod-docs-content-type: ASSEMBLY + +[id="gw-mapping"] + += Mapping + +To control which users are allowed into the {PlatformNameShort} server, and placed into {PlatformNameShort} organizations or teams based on their attributes (like username and email address) or what groups they belong to, you can configure authenticator maps. + +Authenticator maps allow you to add conditions that must be met before a user is given or denied access to a resource type. Authenticator maps are associated with an authenticator and are given an order. The maps are processed in order when the user logs in. These can be thought of like firewall rules or mail filters. + +include::platform/con-gw-authenticator-map-types.adoc[leveloffset=+1] + +include::platform/con-gw-authenticator-map-triggers.adoc[leveloffset=+1] + +include::platform/con-gw-authenticator-map-examples.adoc[leveloffset=+1] + +include::platform/proc-gw-allow-mapping.adoc[leveloffset=+1] + +include::platform/ref-controller-organization-mapping.adoc[leveloffset=+1] + +include::platform/ref-controller-team-mapping.adoc[leveloffset=+1] + +include::platform/proc-gw-role-mapping.adoc[leveloffset=+1] + +include::platform/proc-gw-superuser-mapping.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-gw-resources.adoc b/downstream/assemblies/platform/assembly-gw-resources.adoc new file mode 100644 index 0000000000..5762e69f02 --- /dev/null +++ b/downstream/assemblies/platform/assembly-gw-resources.adoc @@ -0,0 +1,16 @@ +ifdef::context[:parent-context: {context}] + +:_mod-docs-content-type: ASSEMBLY + +[id="assembly-gw-resources"] + += Resources + +You can manage user access to {PlatformNameShort} resources and what users can do with those resources. Users are granted access through the roles to which they are assigned or through roles inherited through the role hierarchy, for example, through the roles they inherit through team membership. {PlatformNameShort} resources differ depending on the functionality you are configuring. For example, resources can be job templates and projects for automation execution or decision environments and rulebook activations for automation decisions. + +include::platform/proc-gw-team-access-resources.adoc[leveloffset=+1] + +include::platform/proc-gw-user-access-resources.adoc[leveloffset=+1] + +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/platform/assembly-gw-roles.adoc b/downstream/assemblies/platform/assembly-gw-roles.adoc new file mode 100644 index 0000000000..50a859b546 --- /dev/null +++ b/downstream/assemblies/platform/assembly-gw-roles.adoc @@ -0,0 +1,13 @@ +:_mod-docs-content-type: ASSEMBLY + +[id="assembly-gw-roles"] + += Roles + +Roles are units of organization in the {PlatformName}. When you assign a role to a team or user, you are granting access to use, read, or write credentials. Because of the file structure associated with a role, roles become redistributable units that enable you to share behavior among resources, or with other users. All access that is granted to use, read, or write credentials is handled through roles, and roles are defined for a resource. + +include::platform/proc-gw-roles.adoc[leveloffset=+1] +include::platform/proc-gw-create-roles.adoc[leveloffset=+1] +include::platform/proc-gw-edit-roles.adoc[leveloffset=+1] +include::platform/proc-gw-delete-roles.adoc[leveloffset=+1] + diff --git a/downstream/assemblies/platform/assembly-gw-settings.adoc b/downstream/assemblies/platform/assembly-gw-settings.adoc new file mode 100644 index 0000000000..763df5ef21 --- /dev/null +++ b/downstream/assemblies/platform/assembly-gw-settings.adoc @@ -0,0 +1,27 @@ +:_mod-docs-content-type: ASSEMBLY + +[id="assembly-gw-settings"] + += Configuring {PlatformNameShort} + +You can configure {PlatformNameShort} from the *Settings* menu using the following selections: + +* *Subscriptions* +* *{GatewayStart}* +* *User Preferences* +* *Troubleshooting* + +[NOTE] +==== +The other selections available from the *Settings* menu are specific to automation execution. For more information, refer to the link:{URLControllerAdminGuide}/index#controller-config[{TitleControllerAdminGuide}] guide. +==== + +include::platform/proc-controller-configure-subscriptions.adoc[leveloffset=+1] +include::platform/proc-settings-platform-gateway.adoc[leveloffset=+1] +include::platform/proc-settings-gw-security-options.adoc[leveloffset=+2] +include::platform/proc-settings-gw-session-options.adoc[leveloffset=+2] +include::platform/proc-settings-gw-password-security.adoc[leveloffset=+2] +include::platform/proc-settings-gw-custom-login.adoc[leveloffset=+2] +include::platform/proc-settings-gw-additional-options.adoc[leveloffset=+2] +include::platform/proc-settings-user-preferences.adoc[leveloffset=+1] +include::platform/proc-settings-troubleshooting.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-gw-token-based-authentication.adoc b/downstream/assemblies/platform/assembly-gw-token-based-authentication.adoc new file mode 100644 index 0000000000..7d74bfc073 --- /dev/null +++ b/downstream/assemblies/platform/assembly-gw-token-based-authentication.adoc @@ -0,0 +1,29 @@ +ifdef::context[:parent-context: {context}] + +:_mod-docs-content-type: ASSEMBLY + +[id="gw-token-based-authentication"] + += Configuring access to external applications with token-based authentication + +Token-based authentication permits authentication of third-party tools and services with the platform through integrated OAuth 2 token support, and allows you to access external applications without having to store your password on disk. + +For more information on the OAuth2 specification, see link:https://datatracker.ietf.org/doc/html/rfc6749[The OAuth 2.0 Authorization Framework]. + +For more information on using the `manage` utility to create tokens, see xref:ref-controller-token-session-management[Token and session management]. + +include::assembly-controller-applications.adoc[leveloffset=+1] +include::platform/proc-controller-apps-create-tokens.adoc[leveloffset=+1] +include::platform/ref-controller-app-token-functions.adoc[leveloffset=+2] +include::platform/ref-controller-refresh-existing-token.adoc[leveloffset=+3] +include::platform/ref-controller-revoke-access-token.adoc[leveloffset=+3] +include::platform/ref-controller-token-session-management.adoc[leveloffset=+2] +include::platform/ref-controller-create-oauth2-token.adoc[leveloffset=+3] +include::platform/ref-controller-revoke-oauth2-token.adoc[leveloffset=+3] +include::platform/ref-controller-clear-tokens.adoc[leveloffset=+3] +//[emcwhinn - Temporarily hiding expire sessions module as it does not yet exist for gateway as per AAP-35735] +//include::platform/ref-controller-expire-sessions.adoc[leveloffset=+3] +include::platform/ref-controller-clear-sessions.adoc[leveloffset=+3] + +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-horizontal-scaling.adoc b/downstream/assemblies/platform/assembly-horizontal-scaling.adoc new file mode 100644 index 0000000000..6d0262b243 --- /dev/null +++ b/downstream/assemblies/platform/assembly-horizontal-scaling.adoc @@ -0,0 +1,16 @@ +ifdef::context[:parent-context: {context}] + +[id="assembly-horizontal-scaling"] += Horizontal Scaling in {PlatformName} + +//:context: activate-aap + +[role="_abstract"] +You can set up multi-node deployments for components across {PlatformNameShort}. Whether you require horizontal scaling for {MenuTopAE}, {MenuAD}, or {AutomationMesh}, you can scale your deployments based on your organization’s needs. + +include::platform/con-hs-eda-controller.adoc[leveloffset=+1] +include::platform/con-hs-eda-sizing-scaling.adoc[leveloffset=+2] +include::platform/proc-hs-eda-setup.adoc[leveloffset=+2] + +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/platform/assembly-install-aap-operator.adoc b/downstream/assemblies/platform/assembly-install-aap-operator.adoc index 4f150282cb..84ae00b526 100644 --- a/downstream/assemblies/platform/assembly-install-aap-operator.adoc +++ b/downstream/assemblies/platform/assembly-install-aap-operator.adoc @@ -1,13 +1,24 @@ - ifdef::context[:parent-context: {context}] +[id="install-aap-operator_{context}"] - -[id="assembly-install-aap-operator"] -= Installing the {PlatformName} operator on {OCP} += Installing the {OperatorPlatformName} on {OCP} [role="_abstract"] +When installing your {OperatorPlatformNameShort} you have a choice of a namespace-scoped operator or a cluster-scoped operator. +This depends on the update channel you choose, stable-2.x or cluster-scoped-2.x. + +A namespace-scoped operator is confined to one namespace, offering tighter security. A cluster-scoped operator spans multiple namespaces, which grants broader permissions. + +If you are managing multiple {PlatformNameShort} instances with the same {OperatorPlatformNameShort} version, use the cluster-scoped operator, which uses a single operator to manage all {PlatformNameShort} custom resources in your cluster. + +If you need multiple operator versions in the same cluster, you must use the namespace-scoped operator. +The operator and the deployment share the same namespace. +This can also be helpful when debugging because the operator logs pertain to custom resources in that namespace only. + +For help with installing a namespace or cluster-scoped operator see the following procedure. + .Prerequisites * You have installed the {PlatformName} catalog in OperatorHub. @@ -20,5 +31,6 @@ ifdef::context[:parent-context: {context}] include::platform/proc-install-aap-operator.adoc[leveloffset=+2] + ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/platform/assembly-installing-aap-operator-cli.adoc b/downstream/assemblies/platform/assembly-installing-aap-operator-cli.adoc index e2ba37cb62..945e5abe77 100644 --- a/downstream/assemblies/platform/assembly-installing-aap-operator-cli.adoc +++ b/downstream/assemblies/platform/assembly-installing-aap-operator-cli.adoc @@ -1,20 +1,12 @@ -// Used in -// titles/aap-operator-installation/ -//// -Retains the context of the parent assembly if this assembly is nested within another assembly. -For more information about nesting assemblies, see: https://redhat-documentation.github.io/modular-docs/#nesting-assemblies -See also the complementary step on the last line of this file. -//// - ifdef::context[:parent-context: {context}] -[id="installing-aap-operator-cli"] -= Installing {OperatorPlatform} from the {OCPShort} CLI +[id="installing-aap-operator-cli_{context}"] += Installing {OperatorPlatformName} from the {OCPShort} CLI :context: installing-aap-operator-cli [role="_abstract"] -Use these instructions to install the {OperatorPlatform} on {OCP} from the {OCPShort} command-line interface (CLI) using the [command]`oc` command. +Use these instructions to install the {OperatorPlatformNameShort} on {OCP} from the {OCPShort} command-line interface (CLI) using the [command]`oc` command. == Prerequisites diff --git a/downstream/assemblies/platform/assembly-installing-controller-operator.adoc b/downstream/assemblies/platform/assembly-installing-controller-operator.adoc index 9bff54c4c2..d20b9cc911 100644 --- a/downstream/assemblies/platform/assembly-installing-controller-operator.adoc +++ b/downstream/assemblies/platform/assembly-installing-controller-operator.adoc @@ -8,14 +8,13 @@ ifdef::context[:parent-context: {context}] [id="installing-controller-operator"] -= Installing and configuring {ControllerName} on {OCP} web console - += Configuring {ControllerName} on {OCP} web console :context: installing-contr-operator [role="_abstract"] -You can use these instructions to install the {ControllerName} operator on {OCP}, specify custom resources, and deploy {PlatformNameShort} with an external database. +You can use these instructions to configure the {ControllerName} operator on {OCP}, specify custom resources, and deploy {PlatformNameShort} with an external database. {ControllerNameStart} configuration can be done through the {ControllerName} extra_settings or directly in the user interface after deployment. However, it is important to note that configurations made in extra_settings take precedence over settings made in the user interface. @@ -30,21 +29,22 @@ When an instance of {ControllerName} is removed, the associated PVCs are not aut == Prerequisites * You have installed the {PlatformName} catalog in Operator Hub. -* For Controller, a default StorageClass must be configured on the cluster for the operator to dynamically create needed PVCs. This is not necessary if an external PostgreSQL database is configured. +* For {ControllerName}, a default StorageClass must be configured on the cluster for the operator to dynamically create needed PVCs. This is not necessary if an external PostgreSQL database is configured. * For Hub a StorageClass that supports ReadWriteMany must be available on the cluster to dynamically created the PVC needed for the content, redis and api pods. If it is not the default StorageClass on the cluster, you can specify it when creating your AutomationHub object. -== Installing the {ControllerName} operator -Use this procedure to install the {ControllerName} operator. +//Not relevant for 2.5 EA, commenting out section [gmurray] +//== Installing the {ControllerName} operator +//Use this procedure to install the {ControllerName} operator. -.Procedure +//.Procedure -. Navigate to menu:Operators[Installed Operators], then click on the *Ansible Automation Platform* operator. -. Locate the *Automation controller* tab, then click btn:[Create instance]. +//. Navigate to menu:Operators[Installed Operators], then click on the *Ansible Automation Platform* operator. +//. Locate the *Automation controller* tab, then click btn:[Create instance]. -You can proceed with configuring the instance using either the Form View or YAML view. +//You can proceed with configuring the instance using either the Form View or YAML view. -include::platform/proc-creating-controller-form-view.adoc[leveloffset=+2] +//include::platform/proc-creating-controller-form-view.adoc[leveloffset=+2] include::platform/proc-configuring-controller-image-pull-policy.adoc[leveloffset=+2] include::platform/proc-configuring-controller-ldap-security.adoc[leveloffset=+2] include::platform/proc-configuring-controller-route-options.adoc[leveloffset=+2] diff --git a/downstream/assemblies/platform/assembly-installing-hub-operator.adoc b/downstream/assemblies/platform/assembly-installing-hub-operator.adoc index dbc6a7648e..9a8b9e9235 100644 --- a/downstream/assemblies/platform/assembly-installing-hub-operator.adoc +++ b/downstream/assemblies/platform/assembly-installing-hub-operator.adoc @@ -8,12 +8,12 @@ ifdef::context[:parent-context: {context}] [id="installing-hub-operator"] -= Installing and configuring {HubName} on {OCP} web console += Configuring {HubName} on {OCP} web console :context: installing-hub-operator [role="_abstract"] -You can use these instructions to install the {HubName} operator on {OCP}, specify custom resources, and deploy {PlatformNameShort} with an external database. +You can use these instructions to configure the {HubName} operator on {OCP}, specify custom resources, and deploy {PlatformNameShort} with an external database. {HubNameStart} configuration can be done through the {HubName} pulp_settings or directly in the user interface after deployment. However, it is important to note that configurations made in pulp_settings take precedence over settings made in the user interface. Hub settings should always be set as lowercase on the Hub custom resource specification. @@ -27,15 +27,16 @@ When an instance of {HubName} is removed, the PVCs are not automatically deleted == Prerequisites -* You have installed the {PlatformName} operator in Operator Hub. +* You have installed the {OperatorPlatformNameShort} in Operator Hub. -== Installing the {HubName} operator -Use this procedure to install the {HubName} operator. +// commenting out below as encouraging users to use platform gateway for installation, only covering configuration here [gmurray] +// == Installing the {HubName} operator +// Use this procedure to install the {HubName} operator. -.Procedure +// .Procedure -. Navigate to menu:Operators[Installed Operators]. -. Locate the *Automation hub* entry, then click btn:[Create instance]. +// . Navigate to menu:Operators[Installed Operators]. +// . Locate the *Automation hub* entry, then click btn:[Create instance]. include::platform/con-storage-options-for-operator-installation-on-ocp.adoc[leveloffset=+2] include::platform/proc-provision-ocp-storage-with-readwritemany.adoc[leveloffset=+3] @@ -43,7 +44,6 @@ include::platform/proc-provision-ocp-storage-amazon-s3.adoc[leveloffset=+3] include::platform/proc-provision-ocp-storage-azure-blob.adoc[leveloffset=+3] include::platform/proc-hub-route-options.adoc[leveloffset=+2] include::platform/proc-hub-ingress-options.adoc[leveloffset=+2] -include::platform/proc-configure-ldap-hub-ocp.adoc[leveloffset=+1] include::platform/proc-access-hub-operator-ui.adoc[leveloffset=+1] include::platform/proc-operator-external-db-hub.adoc[leveloffset=+1] include::platform/proc-enable-hstore-extension.adoc[leveloffset=+2] diff --git a/downstream/assemblies/platform/assembly-inventory-file-importing.adoc b/downstream/assemblies/platform/assembly-inventory-file-importing.adoc index ef1d6c8d17..a431b55f17 100644 --- a/downstream/assemblies/platform/assembly-inventory-file-importing.adoc +++ b/downstream/assemblies/platform/assembly-inventory-file-importing.adoc @@ -2,7 +2,7 @@ = Inventory File Importing -With {ControllerNameStart} you can select an inventory file from source control, rather than creating one from scratch. +With {ControllerName} you can select an inventory file from source control, rather than creating one from scratch. //This function is the same as for custom inventory scripts, except that the contents are obtained from source control instead of editing their contents in a browser. The files are non-editable, and as inventories are updated at the source, the inventories within the projects are also updated accordingly, including the `group_vars` and `host_vars` files or directory associated with them. SCM types can consume both inventory files and scripts. @@ -21,7 +21,7 @@ For example, if importing from a sourced `.ini` file, you can add the following Similarly, group descriptions also default to _imported_, but can also be overridden by `_awx_description`. -To use old inventory scripts in source control, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-inventories#ref-controller-export-old-scripts[Export old inventory scripts] in the _{ControllerUG}_. +To use old inventory scripts in source control, see link:{URLControllerUserGuide}/controller-inventories#ref-controller-export-old-scripts[Export old inventory scripts] in _{ControllerUG}_. //include::platform/con-controller-custom-dynamic-inv-scripts.adoc[leveloffset=+1] include::platform/ref-controller-scm-inv-source-fields.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-inventory-introduction.adoc b/downstream/assemblies/platform/assembly-inventory-introduction.adoc index 82c3b60fb8..9e7a2d7228 100644 --- a/downstream/assemblies/platform/assembly-inventory-introduction.adoc +++ b/downstream/assemblies/platform/assembly-inventory-introduction.adoc @@ -17,9 +17,11 @@ The following table shows possible locations: [cols="30%,70%",options="header"] |==== | Installer | Location -| *Bundle tar* | `/ansible-automation-platform-setup-bundle-` -| *Non-bundle tar* | `/ansible-automation-platform-setup-` | *RPM* | `/opt/ansible-automation-platform/installer` +| *RPM bundle tar* | `/ansible-automation-platform-setup-bundle-` +| *RPM non-bundle tar* | `/ansible-automation-platform-setup-` +| *Container bundle tar* | `/ansible-automation-platform-containerized-setup-bundle-` +| *Container non-bundle tar* | `/ansible-automation-platform-containerized-setup-` |==== You can verify the hosts in your inventory using the command: @@ -34,15 +36,20 @@ ansible all -i Controller node B ++ +Controller node A --> Controller node C ++ +Controller node B --> Controller node C ++ +You can force the listener by setting ++ +`receptor_listener=True` ++ +However, a connection Controller B --> A is likely to be rejected as that connection already exists. ++ +This means that nothing connects to Controller A as Controller A is creating the connections to the other nodes, and the following command does not return anything on Controller A: ++ +`[root@controller1 ~]# ss -ntlp | grep 27199 [root@controller1 ~]#` +==== .{InsightsName} [options="header"] @@ -244,10 +164,10 @@ a|Open *only* if the internal database is used along with another component. Oth |link:https://console.redhat.com[https://console.redhat.com:443] |General account services, subscriptions |link:https://catalog.redhat.com[https://catalog.redhat.com:443] |Indexing execution environments |link:https://sso.redhat.com[https://sso.redhat.com:443] |TCP -|link:https://automation-hub-prd.s3.amazonaws.com[https://automation-hub-prd.s3.amazonaws.com:443] + -link:https://automation-hub-prd.s3.us-east-2.amazonaws.com/[https://automation-hub-prd.s3.us-east-2.amazonaws.com:443/]| Firewall access +|\https://automation-hub-prd.s3.amazonaws.com + +\https://automation-hub-prd.s3.us-east-2.amazonaws.com| Firewall access |link:https://galaxy.ansible.com[https://galaxy.ansible.com:443] |Ansible Community curated Ansible content -|link:https://ansible-galaxy-ng.s3.dualstack.us-east-1.amazonaws.com[https://ansible-galaxy-ng.s3.dualstack.us-east-1.amazonaws.com:443] | Dual Stack IPv6 endpoint for Community curated Ansible content repository +|\https://ansible-galaxy-ng.s3.dualstack.us-east-1.amazonaws.com | Dual Stack IPv6 endpoint for Community curated Ansible content repository |link:https://registry.redhat.io[https://registry.redhat.io:443] |Access to container images provided by Red Hat and partners |link:https://cert.console.redhat.com[https://cert.console.redhat.com:443] |Red Hat and partner curated Ansible Collections |=== @@ -265,16 +185,28 @@ link:https://automation-hub-prd.s3.us-east-2.amazonaws.com/[https://automation-h [IMPORTANT] ==== -Image manifests and filesystem blobs are served directly from `registry.redhat.io`. -However, from 1 May 2023, filesystem blobs are served from `quay.io` instead. -To avoid problems pulling container images, you must enable outbound connections to the listed `quay.io` hostnames. +As of *April 1st, 2025*, `quay.io` is adding three additional endpoints. As a result, customers must adjust allow/block lists within their firewall systems lists to include the following endpoints: + +* `cdn04.quay.io` +* `cdn05.quay.io` +* `cdn06.quay.io` + +To avoid problems pulling container images, customers must allow outbound TCP connections (ports 80 and 443) to the following hostnames: + +* `cdn.quay.io` +* `cdn01.quay.io` +* `cdn02.quay.io` +* `cdn03.quay.io` +* `cdn04.quay.io` +* `cdn05.quay.io` +* `cdn06.quay.io` -This change should be made to any firewall configuration that specifically enables outbound connections to `registry.redhat.io`. +This change should be made to any firewall configuration that specifically enables outbound connections to `registry.redhat.io` or `registry.access.redhat.com`. Use the hostnames instead of IP addresses when configuring firewall rules. -After making this change, you can continue to pull images from `registry.redhat.io`. -You do not require a `quay.io` login, or need to interact with the `quay.io` registry directly in any way to continue pulling Red Hat container images. +After making this change, you can continue to pull images from `registry.redhat.io` or `registry.access.redhat.com`. You do not require a `quay.io` login, or need to interact with the `quay.io` registry directly in any way to continue pulling Red Hat container images. -For more information, see link:https://access.redhat.com/articles/6999582[Firewall changes for container image pulls]. +For more information, see link:https://access.redhat.com/articles/7084334[Firewall changes for container image pulls 2024/2025]. ==== +// emurtoug: This note is also included in the Managing content guide \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-operator-add-execution-nodes.adoc b/downstream/assemblies/platform/assembly-operator-add-execution-nodes.adoc index 8bf218fe42..9f54774028 100644 --- a/downstream/assemblies/platform/assembly-operator-add-execution-nodes.adoc +++ b/downstream/assemblies/platform/assembly-operator-add-execution-nodes.adoc @@ -2,11 +2,11 @@ ifdef::context[:parent-context: {context}] [id="operator-add-execution-nodes_{context}"] -= Adding execution nodes to {PlatformNameShort} Operator += Adding execution nodes to {OperatorPlatformName} :context: operator-upgrade -You can enable the {PlatformNameShort} Operator with execution nodes by downloading and installing the install bundle. +You can enable the {OperatorPlatformNameShort} with execution nodes by downloading and installing the install bundle. include::platform/proc-add-operator-execution-nodes.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-operator-debugging.adoc b/downstream/assemblies/platform/assembly-operator-debugging.adoc new file mode 100644 index 0000000000..eeb83846f0 --- /dev/null +++ b/downstream/assemblies/platform/assembly-operator-debugging.adoc @@ -0,0 +1,15 @@ +ifdef::context[:parent-context: {context}] + +:context: operator-debugging + + +[id="operator-debugging"] += Debugging the {OperatorPlatform} + +include::platform/con-operator-ansible-verbosity.adoc[leveloffset=1] + + + + +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-operator-install-planning.adoc b/downstream/assemblies/platform/assembly-operator-install-planning.adoc index 0f20c18ffc..a35c900359 100644 --- a/downstream/assemblies/platform/assembly-operator-install-planning.adoc +++ b/downstream/assemblies/platform/assembly-operator-install-planning.adoc @@ -4,7 +4,7 @@ ifdef::context[:parent-context: {context}] [id="operator-install-planning"] -= Planning your {PlatformName} operator installation on {OCP} += Planning your {OperatorPlatformName} on {OCP} :context: operator-install-planning @@ -12,7 +12,7 @@ ifdef::context[:parent-context: {context}] [role="_abstract"] {PlatformName} is supported on both Red Hat Enterprise Linux and Red Hat Openshift. -OpenShift operators help install and automate day-2 operations of complex, distributed software on {OCP}. The {OperatorPlatform} enables you to deploy and manage {PlatformNameShort} components on {OCP}. +OpenShift operators help install and automate day-2 operations of complex, distributed software on {OCP}. The {OperatorPlatformNameShort} enables you to deploy and manage {PlatformNameShort} components on {OCP}. You can use this section to help plan your {PlatformName} installation on your {OCP} environment. Before installing, review the supported installation scenarios to determine which meets your requirements. diff --git a/downstream/assemblies/platform/assembly-operator-upgrade.adoc b/downstream/assemblies/platform/assembly-operator-upgrade.adoc index c8abe644bf..bfb90b8105 100644 --- a/downstream/assemblies/platform/assembly-operator-upgrade.adoc +++ b/downstream/assemblies/platform/assembly-operator-upgrade.adoc @@ -3,19 +3,21 @@ ifdef::context[:parent-context: {context}] [id="operator-upgrade_{context}"] -= Upgrading {OperatorPlatform} on {OCPShort} += Upgrading {OperatorPlatformName} on {OCPShort} :context: operator-upgrade [role="_abstract"] -The {OperatorPlatform} simplifies the installation, upgrade and deployment of new {PlatformName} instances in your {OCPShort} environment. +The {OperatorPlatformNameShort} simplifies the installation, upgrade, and deployment of new {PlatformName} instances in your {OCPShort} environment. +include::platform/con-operator-upgrade-overview.adoc[leveloffset=+1] include::platform/con-operator-upgrade-considerations.adoc[leveloffset=+1] include::platform/con-operator-upgrade-prereq.adoc[leveloffset=+1] +include::platform/con-operator-channel-upgrade.adoc[leveloffset=+1] include::platform/proc-operator-upgrade.adoc[leveloffset=+1] - - +include::platform/proc-operator-create_crs.adoc[leveloffset=+1] +include::assembly-aap-post-upgrade.adoc[leveloffset=+1] ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/platform/assembly-planning-installation.adoc b/downstream/assemblies/platform/assembly-planning-installation.adoc index f46526e3aa..b4e2bfa087 100644 --- a/downstream/assemblies/platform/assembly-planning-installation.adoc +++ b/downstream/assemblies/platform/assembly-planning-installation.adoc @@ -10,7 +10,7 @@ ifdef::context[:parent-context: {context}] [role="_abstract"] {PlatformName} is supported on both {RHEL} and Red Hat OpenShift. Use this guide to plan your {PlatformName} installation on {RHEL}. -To install {PlatformName} on your {OCP} environment, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/deploying_the_red_hat_ansible_automation_platform_operator_on_openshift_container_platform/index[Deploying the Red Hat Ansible Automation Platform operator on OpenShift Container Platform]. +To install {PlatformName} on your {OCP} environment, see link:{URLOperatorInstallation}[{TitleOperatorInstallation}]. ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/platform/assembly-platform-install-overview.adoc b/downstream/assemblies/platform/assembly-platform-install-overview.adoc index e928ba7551..1a0b7c2915 100644 --- a/downstream/assemblies/platform/assembly-platform-install-overview.adoc +++ b/downstream/assemblies/platform/assembly-platform-install-overview.adoc @@ -9,25 +9,30 @@ ifdef::context[:parent-context: {context}] [role="_abstract"] -The {PlatformName} installation program offers you flexibility, allowing you to install {PlatformNameShort} by using a number of supported installation scenarios. Starting with {PlatformNameShort} {PlatformVers}, the installation scenarios include the optional deployment of {EDAcontroller}, which introduces the automated resolution of IT requests. +The {PlatformName} installation program offers you flexibility, allowing you to install {PlatformNameShort} by using several supported installation scenarios. Regardless of the installation scenario you choose, installing {PlatformNameShort} involves the following steps: xref:proc-editing-installer-inventory-file_platform-install-scenario[Editing the {PlatformName} installer inventory file]:: The {PlatformNameShort} installer inventory file allows you to specify your installation scenario and describe host deployments to Ansible. The examples provided in this document show the parameter specifications needed to install that scenario for your deployment. -xref:proc-running-setup-script_platform-install-scenario[Running the {PlatformName} installer setup script]:: The setup script installs your private automation hub by using the required parameters defined in the inventory file. +xref:proc-running-setup-script_platform-install-scenario[Running the {PlatformName} installer setup script]:: The setup script installs {PlatformNameShort} by using the required parameters defined in the inventory file. -xref:proc-verify-controller-installation_platform-install-scenario[Verifying {ControllerName} installation]:: After installing {PlatformNameShort}, you can verify that the installation has been successful by logging in to the {ControllerName}. +xref:proc-verify-aap-installation_platform-install-scenario[Verifying your {PlatformNameShort} installation]:: After installing {PlatformNameShort}, you can verify that the installation has been successful by logging in to the platform UI and seeing the relevant functionality. -xref:proc-verify-hub-installation_platform-install-scenario[Verifying {HubName} installation]:: After installing {PlatformNameShort}, you can verify that the installation has been successful by logging in to the {HubName}. +// Removing to consolidate AAP installation verification - you verify by logging into the gateway rather than logging into each component's UI - AAP-17771 +// xref:proc-verify-controller-installation_platform-install-scenario[Verifying {ControllerName} installation]:: After installing {PlatformNameShort}, you can verify that the installation has been successful by logging in to the {ControllerName}. -xref:proc-verify-eda-controller-installation_platform-install-scenario[Verifying {EDAcontroller} installation]:: After installing {PlatformNameShort}, you can verify that the installation has been successful by logging in to the {EDAcontroller}. +// xref:proc-verify-hub-installation_platform-install-scenario[Verifying {HubName} installation]:: After installing {PlatformNameShort}, you can verify that the installation has been successful by logging in to the {HubName}. + +// xref:proc-verify-eda-controller-installation_platform-install-scenario[Verifying {EDAcontroller} installation]:: After installing {PlatformNameShort}, you can verify that the installation has been successful by logging in to the {EDAcontroller}. //xref:assembly-platform-whats-next_platform-install-scenario[Post-installation steps]:: After successful installation, you can begin using the features of {PlatformNameShort}. [role="_additional-resources"] .Additional resources -For more information about the supported installation scenarios, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_planning_guide/index[{PlatformName} Planning Guide]. + +. For more information about the supported installation scenarios, see the link:{LinkPlanningGuide}. +. For more information on available topologies, see link:{LinkTopologies}. include::platform/con-aap-installation-prereqs.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-platform-install-scenario.adoc b/downstream/assemblies/platform/assembly-platform-install-scenario.adoc index 8298cc4c84..de0ec2199d 100644 --- a/downstream/assemblies/platform/assembly-platform-install-scenario.adoc +++ b/downstream/assemblies/platform/assembly-platform-install-scenario.adoc @@ -10,57 +10,85 @@ ifdef::context[:parent-context: {context}] :context: platform-install-scenario [role="_abstract"] -{PlatformNameShort} is a modular platform. You can deploy {ControllerName} with other automation platform components, such as {HubName} and {EDAcontroller}. For more information about the components provided with {PlatformNameShort}, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_planning_guide/planning-installation#ref-platform-components[{PlatformName} components] in the {PlatformName} Planning Guide. +{PlatformNameShort} is a modular platform. The {Gateway} deploys automation platform components, such as {ControllerName}, {HubName}, and {EDAcontroller}. -There are several supported installation scenarios for {PlatformName}. To install {PlatformName}, you must edit the inventory file parameters to specify your installation scenario. You can use one of the following as a basis for your own inventory file: +For more information about the components provided with {PlatformNameShort}, see link:{URLPlanningGuide}/ref-aap-components[{PlatformName} components] in {TitlePlanningGuide}. + +There are several supported installation scenarios for {PlatformName}. To install {PlatformName}, you must edit the inventory file parameters to specify your installation scenario. You can use the link:{URLTopologies}/rpm-topologies#example_enterprise_inventory_file[enterprise installer] as a basis for your own inventory file. + +// New install scenarios including platform gateway AAP-17771 +//* xref:ref-gateway-controller-ext-db[Single platform gateway and {ControllerName} with an external (installer managed) database] +//* xref:ref-gateway-controller-hub-ext-db[Single platform gateway, {ControllerName}, and {HubName} with an external (installer managed) database] +//* xref:ref-gateway-controller-hub-eda-ext-db[Single platform gateway, {ControllerName}, {HubName}, and {EDAcontroller} node with an external (installer managed) database] + +.Additional resources +For a comprehensive list of pre-defined variables used in Ansible installation inventory files, see xref:ref-ansible-inventory-variables[Ansible variables]. + +// Removed for install scenario consolidation AAP-17771 +// * xref:ref-single-controller-ext-installer-managed-db[Single {ControllerName} with external (installer managed) database] +// * xref:ref-single-controller-hub-ext-database-inventory[Single {ControllerName} and single {HubName} with external (installer managed) database] +// * xref:ref-single-controller-hub-eda-with-managed-db[Single {ControllerName}, single {HubName}, and single event-driven ansible controller node with external (installer managed ) database] //[ifowler] Removed for AAP-18700 Install Guide Scenario Consolidation -//* xref:ref-standlone-platform-inventory_platform-install-scenario[Standalone automation controller with external (installer managed) database] -* xref:ref-single-controller-ext-installer-managed-db[Single {ControllerName} with external (installer managed) database] -//[ifowler] Removed for AAP-18700 Install Guide Scenario Consolidation +//* xref:ref-standlone-platform-inventory_platform-install-scenario[Standalone automation controller with external (installer managed) database] //* xref:ref-single-controller-ext-customer-managed-db_platform-install-scenario[Single {ControllerName} with external (customer provided) database] //* xref:ref-standlone-platform-ext-database-inventory_platform-install-scenario[{PlatformNameShort} with an external (installer managed) database] //* xref:ref-example-platform-ext-database-customer-provided_platform-install-scenario[{PlatformNameShort} with an external (customer provided) database] //* xref:ref-single-eda-controller-with-internal-db_platform-install-scenario[Single {EDAcontroller} node with internal database] //* xref:ref-standlone-hub-inventory_platform-install-scenario[Standalone {HubName} with internal database] -* xref:ref-single-controller-hub-ext-database-inventory[Single {ControllerName} and single {HubName} with external (installer managed) database] -//[ifowler] Removed for AAP-18700 Install Guide Scenario Consolidation //* xref:ref-standalone-hub-ext-database-customer-provided_platform-install-scenario[Single {HubName} with external (customer provided) database] // xref:ref-ldap-config-on-pah_platform-install-scenario[LDAP configuration on {PrivateHubName}] -* xref:ref-single-controller-hub-eda-with-managed-db[Single {ControllerName}, single {HubName}, and single event-driven ansible controller node with external (installer managed ) database] - include::platform/proc-editing-inventory-file.adoc[leveloffset=+1] include::platform/con-install-scenario-examples.adoc[leveloffset=+1] include::platform/con-install-scenario-recommendations.adoc[leveloffset=+2] +//Added for AAP-29120 +include::platform/ref-gateway-controller-ext-db.adoc[leveloffset=+3] +include::platform/ref-gateway-controller-hub-ext-db.adoc[leveloffset=+3] +include::platform/ref-gateway-controller-hub-eda-ext-db.adoc[leveloffset=+3] +include::platform/con-ha-hub-installation.adoc[leveloffset=+3] +include::platform/proc-install-ha-hub-selinux.adoc[leveloffset=+3] +include::platform/proc-configure-pulpcore-service.adoc[leveloffset=+4] +include::platform/proc-apply-selinux-context.adoc[leveloffset=+4] +include::hub/hub/proc-configure-content-signing-on-pah.adoc[leveloffset=+3] +include::platform/proc-add-eda-safe-plugin-var.adoc[leveloffset=+3] + +include::platform/proc-set-registry-username-password.adoc[leveloffset=+2] +//[emcwhinn] Removing for AAP-29246 as content is being moved to one guide in 2.4 customer portal +//include::platform/con-eda-2-5-with-controller-2-4.adoc[leveloffset=+3] //[ifowler] Removed for AAP-18700 Install Guide Scenario Consolidation //include::platform/ref-platform-non-inst-database-inventory.adoc[leveloffset=+3] -include::platform/ref-single-controller-ext-installer-managed-db.adoc[leveloffset=+3] -//[ifowler] Removed for AAP-18700 Install Guide Scenario Consolidation //include::platform/ref-single-controller-ext-customer-managed-db.adoc[leveloffset=+3] //include::platform/ref-example-platform-ext-database-inventory.adoc[leveloffset=+3] //include::platform/ref-example-platform-ext-database-customer-provided.adoc[leveloffset=+3] //include::platform/ref-single-eda-controller-with-internal-db.adoc[leveloffset=+3] //include::platform/ref-standalone-hub-inventory.adoc[leveloffset=+3] -include::platform/ref-standalone-controller-hub-ext-database-inventory.adoc[leveloffset=+3] -include::platform/ref-connect-hub-to-rhsso.adoc[leveloffset=+4] -include::platform/con-ha-hub-installation.adoc[leveloffset=+4] -include::platform/proc-install-ha-hub-selinux.adoc[leveloffset=+4] -include::platform/proc-configure-pulpcore-service.adoc[leveloffset=+4] -include::platform/proc-apply-selinux-context.adoc[leveloffset=+4] -include::hub/hub/proc-configure-content-signing-on-pah.adoc[leveloffset=+3] -include::platform/ref-ldap-config-on-pah.adoc[leveloffset=+3] -include::platform/ref-ldap-referrals.adoc[leveloffset=+3] -include::platform/ref-single-controller-hub-eda-with-managed-db.adoc[leveloffset=+3] +// include::platform/ref-standalone-controller-hub-ext-database-inventory.adoc[leveloffset=+3] +//[rjgrange] Removed for AAP-22613 Removing all references to SSO and LDAP installation +//include::platform/ref-connect-hub-to-rhsso.adoc[leveloffset=+4] + + +//[rjgrange] Removed for AAP-22613 Removing all references to SSO and LDAP installation +//include::platform/ref-ldap-config-on-pah.adoc[leveloffset=+3] +//include::platform/ref-ldap-referrals.adoc[leveloffset=+3] +// include::platform/ref-single-controller-hub-eda-with-managed-db.adoc[leveloffset=+3] //[ifowler] Removed for AAP-18700 Install Guide Scenario Consolidation //include::platform/ref-standalone-hub-ext-database-customer-provided.adoc[leveloffset=+3] // dcdacosta - removed this assembly because the modules are included above. include::assembly-installing-high-availability-hub.adoc[leveloffset=+3] + + +include::platform/ref-redis-config-enterprise-topology.adoc[leveloffset=+3] include::platform/proc-running-setup-script.adoc[leveloffset=+1] -include::platform/proc-verify-controller-installation.adoc[leveloffset=+1] -include::platform/ref-controller-configs.adoc[leveloffset=+2] -include::platform/proc-verify-hub-installation.adoc[leveloffset=+1] -include::platform/ref-hub-configs.adoc[leveloffset=+2] -include::platform/proc-verify-eda-controller-installation.adoc[leveloffset=+1] +include::platform/proc-verify-aap-installation.adoc[leveloffset=+1] +include::platform/con-adding-subscription-manifest.adoc[leveloffset=+1] + +// Removing to consolidate AAP installation verification - you verify by logging into the gateway rather than logging into each component's UI - AAP-17771 +// include::platform/proc-verify-controller-installation.adoc[leveloffset=+1] +// include::platform/ref-controller-configs.adoc[leveloffset=+2] +// include::platform/proc-verify-hub-installation.adoc[leveloffset=+1] +// include::platform/ref-hub-configs.adoc[leveloffset=+2] +// include::platform/proc-verify-eda-controller-installation.adoc[leveloffset=+1] + //[ifowler] Removed for AAP-18700 Install Guide Scenario Consolidation moved to Operations Guide //include::assembly-platform-whats-next.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-platform-whats-next.adoc b/downstream/assemblies/platform/assembly-platform-whats-next.adoc index 99336b92ba..f9bc69b01c 100644 --- a/downstream/assemblies/platform/assembly-platform-whats-next.adoc +++ b/downstream/assemblies/platform/assembly-platform-whats-next.adoc @@ -9,7 +9,7 @@ Whether you are a new {PlatformNameShort} user looking to start automating, or a //isolated node migration //playbooks to download - -include::assembly-migrate-platform.adoc[leveloffset=+1] +//[ddacosta]Migration not part of 2.5EA so removing this section until a migration path is made available. The content will need to be reworked for changes to migration/upgrade. +// include::assembly-migrate-platform.adoc[leveloffset=+1] include::platform/proc-update-ee-image-locations.adoc[leveloffset=+1] include::platform/con-why-automation-mesh.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-setting-up-automation-mesh.adoc b/downstream/assemblies/platform/assembly-setting-up-automation-mesh.adoc index f73e98cff7..97b133106f 100644 --- a/downstream/assemblies/platform/assembly-setting-up-automation-mesh.adoc +++ b/downstream/assemblies/platform/assembly-setting-up-automation-mesh.adoc @@ -13,13 +13,15 @@ ifdef::context[:parent-context: {context}] Configure the {PlatformNameShort} installer to set up {AutomationMesh} for your Ansible environment. Perform additional tasks to customize your installation, such as importing a Certificate Authority (CA) certificate. include::platform/con-install-mesh.adoc[leveloffset=+1] +include::platform/proc-editing-inventory-file.adoc[leveloffset=+1] +include::platform/proc-running-setup-script.adoc[leveloffset=+1] include::platform/proc-import-mesh-ca.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_planning_guide/platform-system-requirements[{PlatformName} System Requirements] +* link:{URLPlanningGuide}/platform-system-requirements[System Requirements] ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/platform/assembly-system-requirements.adoc b/downstream/assemblies/platform/assembly-system-requirements.adoc index ee275df358..8d282f2db8 100644 --- a/downstream/assemblies/platform/assembly-system-requirements.adoc +++ b/downstream/assemblies/platform/assembly-system-requirements.adoc @@ -9,11 +9,24 @@ Use this information when planning your {PlatformName} installations and designi .Prerequisites -* You can obtain root access either through the `sudo` command, or through privilege escalation. For more on privilege escalation see link:https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_privilege_escalation.html[Understanding privilege escalation]. +* You can obtain root access either through the `sudo` command, or through privilege escalation. For more on privilege escalation, see link:https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_privilege_escalation.html[Understanding privilege escalation]. * You can de-escalate privileges from root to users such as: AWX, PostgreSQL, {EDAName}, or Pulp. -* You have configured an NTP client on all nodes. For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_upgrade_and_migration_guide/migrate-isolated-execution-nodes#automation_controller_configuration_requirements[Configuring NTP server using Chrony]. +* You have configured an NTP client on all nodes. +// emurtough commented out link to upgrade and migration guide - to be replaced once the guide is published +// For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_upgrade_and_migration_guide/migrate-isolated-execution-nodes#automation_controller_configuration_requirements[Configuring NTP server using Chrony]. +// emurtough commented out files to address duplication across 2.5 doc set 9/18/2024 +// ddacosta added conditional tags to share content between install guide and planning guide + +ifdef::aap-plan[] +include::platform/ref-RPM-system-requirements.adoc[leveloffset=+1] +include::platform/ref-containerized-system-requirements.adoc[leveloffset=+1] +include::platform/ref-OCP-system-requirements.adoc[leveloffset=+1] +endif::aap-plan[] + +ifdef::aap-install[] include::platform/ref-system-requirements.adoc[leveloffset=+1] +include::platform/ref-gateway-system-requirements.adoc[leveloffset=+1] include::platform/ref-controller-system-requirements.adoc[leveloffset=+1] include::platform/ref-automation-hub-requirements.adoc[leveloffset=+1] include::platform/ref-ha-hub-reqs.adoc[leveloffset=+2] @@ -22,6 +35,7 @@ include::platform/ref-postgresql-requirements.adoc[leveloffset=+1] include::platform/proc-setup-postgresql-ext-database.adoc[leveloffset=+2] include::platform/proc-enable-hstore-extension.adoc[leveloffset=+2] include::platform/proc-benchmark-postgresql.adoc[leveloffset=+2] +endif::aap-install[] ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] diff --git a/downstream/assemblies/platform/assembly-ug-controller-instance-groups.adoc b/downstream/assemblies/platform/assembly-ug-controller-instance-groups.adoc index 323ad42826..387a60df5c 100644 --- a/downstream/assemblies/platform/assembly-ug-controller-instance-groups.adoc +++ b/downstream/assemblies/platform/assembly-ug-controller-instance-groups.adoc @@ -10,8 +10,8 @@ image::ug-instance-groups_list_view.png[Instance groups list view] .Additional resources -* For more information about the policy or rules associated with instance groups, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/controller-instance-and-container-groups#controller-instance-groups[Instance Groups] section of the _{ControllerAG}_. -* For more information on connecting your instance group to a container, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/controller-instance-and-container-groups#controller-container-groups[Container Groups]. +* For more information about the policy or rules associated with instance groups, see the xref:con-controller-instance-groups[Instance groups] section of the _{ControllerAG}_. +* For more information about connecting your instance group to a container, see xref:controller-container-groups[Container groups]. include::platform/proc-controller-create-instance-group.adoc[leveloffset=+1] include::platform/proc-controller-associate-instances-to-instance-group.adoc[leveloffset=+2] diff --git a/downstream/assemblies/platform/assembly-ug-controller-job-templates.adoc b/downstream/assemblies/platform/assembly-ug-controller-job-templates.adoc index 37af94ba2a..13e265c1de 100644 --- a/downstream/assemblies/platform/assembly-ug-controller-job-templates.adoc +++ b/downstream/assemblies/platform/assembly-ug-controller-job-templates.adoc @@ -2,18 +2,24 @@ = Job templates +You can create both Job templates and Workflow job templates from {MenuAETemplates}. + +For Workflow job templates, see xref:controller-workflow-job-templates[Workflow job templates]. + A job template is a definition and set of parameters for running an Ansible job. Job templates are useful to run the same job many times. They also encourage the reuse of Ansible Playbook content and collaboration between teams. -The *Templates* list view shows job templates that are currently available. +The *Templates* page shows both job templates and workflow job templates that are currently available. The default view is collapsed (Compact), showing the template name, template type, and the timestamp of the last job that ran using that template. You can click the arrow image:arrow.png[Arrow,15,15] icon next to each entry to expand and view more information. This list is sorted alphabetically by name, but you can sort by other criteria, or search by various fields and attributes of a template. //image::ug-job-templates-home.png[Job templates home] -From this screen you can launch image:rightrocket.png[Rightrocket,15,15], edit image:leftpencil.png[Leftpencil,15,15], copy image:copy.png[Copy,15,15] and delete image:delete-button.png[Delete,15.15] a job template. +From this screen you can launch image:rightrocket.png[Rightrocket,15,15], edit image:leftpencil.png[Leftpencil,15,15], copy image:copy.png[Copy,15,15] and delete image:delete-button.png[Delete,15.15] a job template. + +Workflow templates have the workflow visualizer image:visualizer.png[Workflow visualizer,15,15] icon as a shortcut for accessing the workflow editor. [NOTE] ==== diff --git a/downstream/assemblies/platform/assembly-ug-controller-jobs.adoc b/downstream/assemblies/platform/assembly-ug-controller-jobs.adoc index acd3c097d7..48cfc40513 100644 --- a/downstream/assemblies/platform/assembly-ug-controller-jobs.adoc +++ b/downstream/assemblies/platform/assembly-ug-controller-jobs.adoc @@ -2,7 +2,7 @@ = Jobs in {ControllerName} -A job is an instance of {ControllerName} launching an Ansible playbook against an inventory of hosts. +A job is an instance of {ControllerName} launching an Ansible Playbook against an inventory of hosts. The *Jobs* list view displays a list of jobs and their statuses, shown as completed successfully, failed, or as an active (running) job. The default view is collapsed (Compact) with the job name, status, job type, start, and finish times. @@ -27,7 +27,7 @@ image::ug-job-details-view-filters.png[Job details view filters] * The *Event* option in the *Search output* list enables you to filter by the events of interest, such as errors, host failures, host retries, and items skipped. You can include as many events in the filter as necessary. //* The *Advanced* option is a refined search that gives you a combination of including or excluding criteria, searching by key, or by lookup type. -For more information on using the search, refer to the xref:assembly-controller-search[Search] section. +For more information about using the search, see the xref:assembly-controller-search[Search] section. include::platform/con-controller-inventory-sync-jobs.adoc[leveloffset=+1] include::platform/ref-controller-inventory-sync-details.adoc[leveloffset=+2] @@ -35,8 +35,11 @@ include::platform/con-controller-scm-inventory-jobs.adoc[leveloffset=+1] include::platform/ref-controller-scm-inventory-details.adoc[leveloffset=+2] include::platform/con-controller-playbook-run-jobs.adoc[leveloffset=+1] include::platform/ref-controller-playbook-run-search.adoc[leveloffset=+2] -include::platform/ref-controller-host-details.adoc[leveloffset=+2] +//Commenting this out until I know what it's talking about. +//include::platform/ref-controller-host-details.adoc[leveloffset=+2] include::platform/ref-controller-playbook-run-details.adoc[leveloffset=+2] +include::platform/con-controller-playbook-access-info-sharing.adoc[leveloffset=+2] +include::platform/ref-controller-isolation-functionality.adoc[leveloffset=+2] include::platform/con-controller-capacity-determination.adoc[leveloffset=+1] include::platform/con-controller-resource-determination-capacity.adoc[leveloffset=+2] include::platform/ref-controller-memory-relative-capacity.adoc[leveloffset=+3] diff --git a/downstream/assemblies/platform/assembly-ug-controller-notifications.adoc b/downstream/assemblies/platform/assembly-ug-controller-notifications.adoc index 3704f9b7f9..2882483963 100644 --- a/downstream/assemblies/platform/assembly-ug-controller-notifications.adoc +++ b/downstream/assemblies/platform/assembly-ug-controller-notifications.adoc @@ -1,6 +1,6 @@ [id="controller-notifications"] -= Notifications += Notifiers A xref:controller-notification-types[Notification type] such as Email, Slack or a Webhook, is an instance of a Notification Template, and has a name, description and configuration defined in the Notification template. diff --git a/downstream/assemblies/platform/assembly-ug-controller-schedules.adoc b/downstream/assemblies/platform/assembly-ug-controller-schedules.adoc index 62da9665f2..e765c62f4d 100644 --- a/downstream/assemblies/platform/assembly-ug-controller-schedules.adoc +++ b/downstream/assemblies/platform/assembly-ug-controller-schedules.adoc @@ -6,7 +6,8 @@ From the navigation panel, click {MenuAESchedules} to access your configured sch The schedules list can be sorted by any of the attributes from each column using the directional arrows. You can also search by name, date, or the name of the month in which a schedule runs. -Each schedule has options to enable or disable that schedule using the *On* or *Off* toggle next to the schedule name. +Use the *On* or *Off* toggle to stop an active schedule or activate a stopped schedule. + Click the Edit image:leftpencil.png[Edit,15,15] icon to edit a schedule. image::ug-schedules-sample-list.png[Schedules sample list] diff --git a/downstream/assemblies/platform/assembly-ug-controller-work-with-webhooks.adoc b/downstream/assemblies/platform/assembly-ug-controller-work-with-webhooks.adoc index 8eae1af8d3..c771547ade 100644 --- a/downstream/assemblies/platform/assembly-ug-controller-work-with-webhooks.adoc +++ b/downstream/assemblies/platform/assembly-ug-controller-work-with-webhooks.adoc @@ -9,7 +9,7 @@ Set up a webhook using the following services: * xref:controller-set-up-github-webhook[Setting up a GitHub webhook] * xref:controller-set-up-gitlab-webhook[Setting up a GitLab webhook] -* xref:controller-view-payload-output[Viewing a payload output] +* xref:controller-view-payload-output[Viewing the payload output] The webhook post-status-back functionality for GitHub and GitLab is designed to work only under certain CI events. Receiving another kind of event results in messages such as the following in the service log: diff --git a/downstream/assemblies/platform/assembly-ug-controller-workflow-job-templates.adoc b/downstream/assemblies/platform/assembly-ug-controller-workflow-job-templates.adoc index 0c989b0b38..4cad1743da 100644 --- a/downstream/assemblies/platform/assembly-ug-controller-workflow-job-templates.adoc +++ b/downstream/assemblies/platform/assembly-ug-controller-workflow-job-templates.adoc @@ -2,6 +2,10 @@ = Workflow job templates +You can create both Job templates and Workflow job templates from {MenuAETemplates}. + +For Job templates, see xref:controller-job-templates[Job templates]. + A workflow job template links together a sequence of disparate resources that tracks the full set of jobs that were part of the release process as a single unit. These resources include the following: @@ -10,7 +14,7 @@ These resources include the following: * Project syncs * Inventory source syncs -The *Templates* list view shows the workflow and job templates that are currently available. +The *Templates* page shows the workflow and job templates that are currently available. The default view is collapsed (Compact), showing the template name, template type, and the statuses of the jobs that have run by using that template. You can click the arrow next to each entry to expand and view more information. This list is sorted alphabetically by name, but you can sort by other criteria, or search by various fields and attributes of a template. @@ -25,7 +29,7 @@ image::ug-wf-templates-home.png[Workflow templates home] Workflow templates can be used as building blocks for another workflow template. You can enable *Prompt on Launch* by setting up several settings in a workflow template, which you can edit at the workflow job template level. These do not affect the values assigned at the individual workflow template level. -For further instructions, see the xref:controller-workflow-visualizer[Workflow Visualizer] section. +For further instructions, see the xref:controller-workflow-visualizer[Workflow visualizer] section. ==== include::platform/proc-controller-create-workflow-template.adoc[leveloffset=+1] diff --git a/downstream/assemblies/platform/assembly-update-container.adoc b/downstream/assemblies/platform/assembly-update-container.adoc new file mode 100644 index 0000000000..89bcefe5c5 --- /dev/null +++ b/downstream/assemblies/platform/assembly-update-container.adoc @@ -0,0 +1,9 @@ +[id="update-container"] + += Container-based {PlatformNameShort} + + +To update your container-based {PlatformNameShort}, start by reviewing the update considerations. You can then download the latest version of the {PlatformNameShort} installer, configure the `inventory` file in the installation bundle to reflect your environment, and then run the installer. + +include::platform/proc-update-aap-container.adoc[leveloffset=+1] +include::platform/proc-backup-aap-container.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-update-ocp.adoc b/downstream/assemblies/platform/assembly-update-ocp.adoc new file mode 100644 index 0000000000..cd6c283687 --- /dev/null +++ b/downstream/assemblies/platform/assembly-update-ocp.adoc @@ -0,0 +1,7 @@ +[id="update-ocp"] + += Updating {PlatformNameShort} on {OCPShort} + +You can use an upgrade patch to update your operator-based {PlatformNameShort}. + +include::platform/proc-update-aap-on-ocp.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-update-rpm.adoc b/downstream/assemblies/platform/assembly-update-rpm.adoc new file mode 100644 index 0000000000..e3447dfdbc --- /dev/null +++ b/downstream/assemblies/platform/assembly-update-rpm.adoc @@ -0,0 +1,11 @@ +[id="update-rpm"] + += RPM-based {PlatformNameShort} + +To update your RPM-based {PlatformNameShort}, start by reviewing the update considerations. You can then download the latest version of the {PlatformNameShort} installer, configure the `inventory` file in the installation bundle to reflect your environment, and then run the installer. + +include::platform/con-update-planning.adoc[leveloffset=+1] +include::assembly-choosing-obtaining-installer.adoc[leveloffset=+1] +include::platform/proc-backup-aap-rpm.adoc[leveloffset=+1] +include::platform/proc-inventory-file-setup-rpm.adoc[leveloffset=+1] +include::platform/proc-running-setup-script-for-updates.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/assemblies/platform/assembly-using-rhsso-operator-with-automation-hub.adoc b/downstream/assemblies/platform/assembly-using-rhsso-operator-with-automation-hub.adoc index 4052f59442..e3cc82310f 100644 --- a/downstream/assemblies/platform/assembly-using-rhsso-operator-with-automation-hub.adoc +++ b/downstream/assemblies/platform/assembly-using-rhsso-operator-with-automation-hub.adoc @@ -9,7 +9,7 @@ ifdef::context[:parent-context: {context}] {PrivateHubNameStart} uses {RHSSO} for authentication. The {OperatorRHSSO} creates and manages resources. -Use this Operator to create custom resources to automate {RHSSO} administration in Openshift. +Use this operator to create custom resources to automate {RHSSO} administration in OpenShift. * When installing {PlatformNameShort} on _Virtual Machines_ (VMs) the installer can automatically install and configure {RHSSO} for use with {PrivateHubName}. @@ -28,7 +28,8 @@ include::platform/proc-create-keycloak-instance.adoc[leveloffset=2] include::platform/proc-create-keycloak-realm.adoc[leveloffset=2] include::platform/proc-create-keycloak-client.adoc[leveloffset=2] include::platform/proc-create-a-user.adoc[leveloffset=2] -include::platform/proc-installing-the-ansible-platform-operator.adoc[leveloffset=2] +//[gmurray] commenting out for now as we're trying to encourage users to install all components via platform gateway. +//include::platform/proc-installing-the-ansible-platform-operator.adoc[leveloffset=1] include::platform/proc-creating-a-secret.adoc[leveloffset=2] include::platform/proc-installing-hub-using-operator.adoc[leveloffset=2] include::platform/proc-determine-hub-route.adoc[leveloffset=2] diff --git a/downstream/assemblies/platform/eda b/downstream/assemblies/platform/eda new file mode 120000 index 0000000000..cca4c84ae0 --- /dev/null +++ b/downstream/assemblies/platform/eda @@ -0,0 +1 @@ +../../modules/eda \ No newline at end of file diff --git a/downstream/assemblies/playbooks/assembly-open-source-license.adoc b/downstream/assemblies/playbooks/assembly-open-source-license.adoc new file mode 100644 index 0000000000..ae4033b19c --- /dev/null +++ b/downstream/assemblies/playbooks/assembly-open-source-license.adoc @@ -0,0 +1,5 @@ +[id="assembly-open-source-license"] + += Open source license + +include::../aap-common/gplv3-license-text.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/assemblies/topologies/assembly-appendix-topology-resources.adoc b/downstream/assemblies/topologies/assembly-appendix-topology-resources.adoc new file mode 100644 index 0000000000..fc7542e791 --- /dev/null +++ b/downstream/assemblies/topologies/assembly-appendix-topology-resources.adoc @@ -0,0 +1,9 @@ +[id="appendix-topology-resources"] += Additional resources for tested deployment models + +This appendix provides a reference for the additional resources relevant to the tested deployment models outlined in {TitleTopologies}. + +* For additional information about each of the tested topologies described in this document, see the link:https://github.com/ansible/test-topologies/[test-topologies GitHub repo]. + +* For questions around IBM cloud specific configurations or issues, see link:https://www.ibm.com/mysupport[IBM support]. + diff --git a/downstream/assemblies/topologies/assembly-container-topologies.adoc b/downstream/assemblies/topologies/assembly-container-topologies.adoc new file mode 100644 index 0000000000..ddefcaac5c --- /dev/null +++ b/downstream/assemblies/topologies/assembly-container-topologies.adoc @@ -0,0 +1,11 @@ +[id="container-topologies"] + += Container topologies + +The containerized installer deploys {PlatformNameShort} on {RHEL} by using Podman which runs the platform in containers on host machines. Customers manage the product and infrastructure lifecycle. + +//Container growth topology +include::topologies/ref-cont-a-env-a.adoc[leveloffset=+1] + +//Container enterprise topology +include::topologies/ref-cont-b-env-a.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/assemblies/topologies/assembly-ocp-topologies.adoc b/downstream/assemblies/topologies/assembly-ocp-topologies.adoc new file mode 100644 index 0000000000..7e82863dcc --- /dev/null +++ b/downstream/assemblies/topologies/assembly-ocp-topologies.adoc @@ -0,0 +1,16 @@ +[id="ocp-topologies"] + += Operator topologies + +The {OperatorPlatformNameShort} uses Red Hat OpenShift Operators to deploy {PlatformNameShort} within Red Hat OpenShift. Customers manage the product and infrastructure lifecycle. + +[IMPORTANT] +==== +You can only install a single instance of the {OperatorPlatformNameShort} into a single namespace. +Installing multiple instances in the same namespace can lead to improper operation for both Operator instances. +==== + +//OCP growth topology +include::topologies/ref-ocp-a-env-a.adoc[leveloffset=+1] +//OCP enterprise topology +include::topologies/ref-ocp-b-env-a.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/assemblies/topologies/assembly-overview-tested-deployment-models.adoc b/downstream/assemblies/topologies/assembly-overview-tested-deployment-models.adoc new file mode 100644 index 0000000000..144955f0e9 --- /dev/null +++ b/downstream/assemblies/topologies/assembly-overview-tested-deployment-models.adoc @@ -0,0 +1,38 @@ +[id="overview-tested-deployment-models"] + += Overview of tested deployment models + +Red Hat tests {PlatformNameShort} {PlatformVers} with a defined set of topologies to give you opinionated deployment options. Deploy all components of {PlatformNameShort} so that all features and capabilities are available for use without the need to take further action. + +Red Hat tests the installation of {PlatformNameShort} {PlatformVers} based on a defined set of infrastructure topologies or reference architectures. Enterprise organizations can use one of the enterprise topologies for production deployments to ensure the highest level of uptime, performance, and continued scalability. Organizations or deployments that are resource constrained can use a "growth" topology. + +It is possible to install the {PlatformNameShort} on different infrastructure topologies and with different environment configurations. Red Hat does not fully test topologies outside of published reference architectures. Use a tested topology for all new deployments. + +== Installation and deployment models + +The following table outlines the different ways to install or deploy {PlatformNameShort}: + +.{PlatformNameShort} installation and deployment models +[options="header"] +|==== +| Mode | Infrastructure | Description | Tested topologies +| RPM | Virtual machines and bare metal | The RPM installer deploys {PlatformNameShort} on {RHEL} by using RPMs to install the platform on host machines. Customers manage the product and infrastructure lifecycle. +a| +* link:{URLTopologies}/rpm-topologies#rpm-a-env-a[RPM growth topology] +* link:{URLTopologies}/rpm-topologies#rpm-a-env-b[RPM mixed growth topology] +* link:{URLTopologies}/rpm-topologies#rpm-b-env-a[RPM enterprise topology] +* link:{URLTopologies}/rpm-topologies#rpm-b-env-b[RPM mixed enterprise topology] +| Containers +| Virtual machines and bare metal +| The containerized installer deploys {PlatformNameShort} on {RHEL} by using Podman which runs the platform in containers on host machines. Customers manage the product and infrastructure lifecycle. +a| +* link:{URLTopologies}/container-topologies#cont-a-env-a[Container growth topology] +* link:{URLTopologies}/container-topologies#cont-b-env-a[Container enterprise topology] + +| Operator +| Red Hat OpenShift +| The Operator uses Red Hat OpenShift Operators to deploy {PlatformNameShort} within Red Hat OpenShift. Customers manage the product and infrastructure lifecycle. +a| +* link:{URLTopologies}/ocp-topologies#ocp-a-env-a[Operator growth topology] +* link:{URLTopologies}/ocp-topologies#ocp-b-env-a[Operator enterprise topology] +|==== \ No newline at end of file diff --git a/downstream/assemblies/topologies/assembly-rpm-topologies.adoc b/downstream/assemblies/topologies/assembly-rpm-topologies.adoc new file mode 100644 index 0000000000..e3d0bea7c5 --- /dev/null +++ b/downstream/assemblies/topologies/assembly-rpm-topologies.adoc @@ -0,0 +1,17 @@ +[id="rpm-topologies"] + += RPM topologies + +The RPM installer deploys {PlatformNameShort} on {RHEL} by using RPMs to install the platform on host machines. Customers manage the product and infrastructure lifecycle. + +//RPM growth topology +include::topologies/ref-rpm-a-env-a.adoc[leveloffset=+1] + +//RPM mixed growth topology +include::topologies/ref-rpm-a-env-b.adoc[leveloffset=+1] + +//RPM enterprise topology +include::topologies/ref-rpm-b-env-a.adoc[leveloffset=+1] + +//RPM mixed enterprise topology +include::topologies/ref-rpm-b-env-b.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/assemblies/topologies/topologies b/downstream/assemblies/topologies/topologies new file mode 120000 index 0000000000..e20855697b --- /dev/null +++ b/downstream/assemblies/topologies/topologies @@ -0,0 +1 @@ +../../modules/topologies \ No newline at end of file diff --git a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-backup-recovery.adoc b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-backup-recovery.adoc index a35a7bfb35..0bd7617aa7 100644 --- a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-backup-recovery.adoc +++ b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-backup-recovery.adoc @@ -3,6 +3,6 @@ = Backup and recovery -* For information about performing a backup and recovery of {PlatformNameShort}, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/controller-backup-and-restore#doc-wrapper[Backup and restore] in the Automation Controller Administration Guide. +* For information about performing a backup and recovery of {PlatformNameShort}, see link:{URLControllerAdminGuide}/controller-backup-and-restore[Backup and restore] in _{TitleControllerAdminGuide}_. -* For information about troubleshooting backup and recovery for installations of {OperatorPlatform} on {OCPShort}, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_operator_backup_and_recovery_guide/aap-troubleshoot-backup-recover[Troubleshooting] section in the Red{nbsp}Hat {OperatorPlatform} Backup and Recovery Guide. \ No newline at end of file +* For information about troubleshooting backup and recovery for installations of {OperatorPlatformNameShort} on {OCPShort}, see the link:{URLOperatorBackup}/aap-troubleshoot-backup-recover[Troubleshooting] section in _{TitleOperatorBackup}_. \ No newline at end of file diff --git a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-controller.adoc b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-controller.adoc index c9615b50d5..786946dc94 100644 --- a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-controller.adoc +++ b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-controller.adoc @@ -3,6 +3,6 @@ = Resources for troubleshooting {ControllerName} -* For information about troubleshooting {ControllerName}, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#controller-troubleshooting[Troubleshooting automation controller] in the Automation Controller Administration Guide. +* For information about troubleshooting {ControllerName}, see link:{URLControllerAdminGuide}/controller-troubleshooting[Troubleshooting {ControllerName}] in _{TitleControllerAdminGuide}_. -* For information about troubleshooting the performance of {ControllerName}, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/assembly-controller-improving-performance#ref-controller-performance-troubleshooting[Performance troubleshooting for automation controller] in the Automation Controller Administration Guide. \ No newline at end of file +* For information about troubleshooting the performance of {ControllerName}, see link:{URLControllerAdminGuide}/assembly-controller-improving-performance#ref-controller-performance-troubleshooting[Performance troubleshooting for {ControllerName}] in _{TitleControllerAdminGuide}_. \ No newline at end of file diff --git a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-jobs.adoc b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-jobs.adoc index 12855b2479..77aa219021 100644 --- a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-jobs.adoc +++ b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-jobs.adoc @@ -5,7 +5,8 @@ Troubleshoot issues with jobs. -include::troubleshooting-aap/proc-troubleshoot-job-localhost.adoc[leveloffset=+1] +// Michelle - commenting out for now as it refers to upgrade info +// include::troubleshooting-aap/proc-troubleshoot-job-localhost.adoc[leveloffset=+1] include::troubleshooting-aap/proc-troubleshoot-job-resolve-module.adoc[leveloffset=+1] include::troubleshooting-aap/proc-troubleshoot-job-timeout.adoc[leveloffset=+1] include::troubleshooting-aap/proc-troubleshoot-job-pending.adoc[leveloffset=+1] diff --git a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-playbooks.adoc b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-playbooks.adoc index 5473d6ab0f..c88e776aca 100644 --- a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-playbooks.adoc +++ b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-playbooks.adoc @@ -3,4 +3,7 @@ = Playbooks -You can use {Navigator} to interactively troubleshoot your playbook. For more information about troubleshooting a playbook with {Navigator}, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_content_navigator_creator_guide/assembly-troubleshooting-navigator_ansible-navigator[Troubleshooting Ansible content with {Navigator}] in the Automation Content Navigator Creator Guide. +You can use {Navigator} to interactively troubleshoot your playbook. +For more information about troubleshooting a playbook with {Navigator}, see +link:{URLNavigatorGuide}/assembly-troubleshooting-navigator_ansible-navigator[Troubleshooting Ansible content with {Navigator}] +in the _{TitleNavigatorGuide}_ Guide. diff --git a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-upgrade.adoc b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-upgrade.adoc new file mode 100644 index 0000000000..3a12be1aa0 --- /dev/null +++ b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-upgrade.adoc @@ -0,0 +1,8 @@ + +[id="troubleshoot-upgrade"] + += Upgrading + +Troubleshoot issues when upgrading to {PlatformNameShort} 2.5. + +include::troubleshooting-aap/proc-troubleshoot-upgrade-issues.adoc[leveloffset=+1] diff --git a/downstream/attributes/attributes.adoc b/downstream/attributes/attributes.adoc index f0391dba2f..f79d18dde5 100644 --- a/downstream/attributes/attributes.adoc +++ b/downstream/attributes/attributes.adoc @@ -7,15 +7,19 @@ :CentralAuthStart: Central authentication :CentralAuth: central authentication :PlatformVers: 2.5 +:PostgresVers: PostgreSQL 15 //The Ansible-core version required to install AAP -:CoreInstVers: 2.14 +:CoreInstVers: 2.16 //The Ansible-core version used by the AAP control plane and EEs -:CoreUseVers: 2.15 -:PlatformDownloadUrl: https://access.redhat.com/downloads/content/480/ver=2.5/rhel---9/2.4/x86_64/product-software +:CoreUseVers: 2.16 +:PlatformDownloadUrl: https://access.redhat.com/downloads/content/480/ver=2.5/rhel---9/2.5/x86_64/product-software :BaseURL: https://docs.redhat.com/en/documentation :VMBase: VM-based installation :OperatorBase: operator-based installation :ContainerBase: container-based installation +:PlatformDashboard: platform dashboard +:Gateway: platform gateway +:GatewayStart: Platform gateway // Event-Driven Ansible :EDAName: Event-Driven Ansible @@ -33,8 +37,12 @@ :AAPonAzureName: Red Hat Ansible Automation Platform on Microsoft Azure :AAPonAzureNameShort: Ansible Automation Platform on Microsoft Azure :AWS: Amazon Web Services -:GCP: Google Cloud Platform :Azure: Microsoft Azure +:MSEntraID: Microsoft Entra ID +:SaaSonAWS: Red Hat Ansible Automation Platform Service on AWS +:SaaSonAWSShort: Ansible Automation Platform Service on AWS +// AAP on GCP has been deprecated +:GCP: Google Cloud Platform // Automation Mesh :AutomationMesh: automation mesh @@ -43,7 +51,8 @@ :RunnerRpm: Ansible-runner rpm/container // Operators -:OperatorPlatform: Ansible Automation Platform Operator +:OperatorPlatformName: Red Hat Ansible Automation Platform Operator +:OperatorPlatformNameShort: Ansible Automation Platform Operator :OperatorHub: Ansible Automation Platform Hub Operator :OperatorController: Ansible Automation Platform Controller Operator :OperatorResource: Ansible Automation Platform Resource Operator @@ -90,11 +99,12 @@ :MeshConnect: automation mesh connector :MeshReceptor: automation mesh receptor :ControllerGS: Getting started with automation controller -:ControllerUG: Automation controller User Guide -:ControllerAG: Automation controller Administration Guide +:ControllerUG: Using automation execution +:ControllerAG: Configuring automation execution :Analytics: Automation Analytics - +// Red Hat Edge Manager +:RedHatEdge: Red Hat Edge Manager // Execution environments :ExecEnvNameStart: Automation execution environments @@ -108,10 +118,13 @@ :Runner: Ansible Runner :Role: Role ARG Spec -// Ansible developer tools -:ToolsName: Ansible developer tools +// Ansible development tools +:ToolsName: Ansible development tools :AAPRHDH: Ansible plug-ins for Red Hat Developer Hub +:AAPRHDHShort: Ansible plug-ins :RHDH: Red Hat Developer Hub +:RHDHVers: 1.3 +:RHDHShort: RHDH :Builder: Ansible Builder :Navigator: automation content navigator :NavigatorStart: Automation content navigator @@ -130,7 +143,7 @@ :Console: console.redhat.com // Satellite attributes -:SatelliteVers: 6.15 +:SatelliteVers: 6.16 // OpenShift attributes :OCP: Red Hat OpenShift Container Platform @@ -207,30 +220,30 @@ // FYI Automation Execution and Automation Decisions Projects will be under 1 selection in the 2.5-next or later. :MenuADProjects: menu:{MenuAD}[Projects] :MenuADDecisionEnvironments: menu:{MenuAD}[Decision Environments] -:MenuADWebhooks: menu:{MenuAD}[Webhooks] +:MenuADEventStreams: menu:{MenuAD}[Event Streams] :MenuADCredentials: menu:{MenuAD}[Infrastructure > Credentials] :MenuADCredentialType: menu:{MenuAD}[Infrastructure > Credential Types] -:MenuAECredentials: menu:{MenuTopAE}[Infrastructure > Credentials] -:MenuAECredentialType: menu:{MenuTopAE}[Infrastructure > Credential Types] + + // Automation Content (aka automation hub menu selections) // In 2.5EA the Automation Content selection will open a hub ui instance in a new tab/browser so the menu definitions will not change until 2.5-next -:MenuACNamespaces: menu:Collections[Namespaces] -:MenuACCollections: menu:Collections[Collections] -:MenuACExecEnvironments: menu:Execution Environments[Execution Environments] +:MenuACNamespaces: menu:{MenuTopAC}[Namespaces] +:MenuACCollections: menu:{MenuTopAC}[Collections] +:MenuACExecEnvironments: menu:{MenuTopAC}[Execution Environments] // Automation Content > Administration -:MenuACAdminSignatureKeys: menu:Signature Keys[] -:MenuACAdminRepositories: menu:Collections[Repositories] -:MenuACAdminRemoteRegistries: menu:Execution Environments[Remote Registries] -:MenuACAdminTasks: menu:Task Management[] -:MenuACAdminCollectionApproval: menu:Collections[Approval] -:MenuACAdminRemotes: menu:Collections[Remotes] -:MenuACAPIToken: menu:Collections[API token] +:MenuACAdminSignatureKeys: menu:{MenuTopAC}[Signature Keys] +:MenuACAdminRepositories: menu:{MenuTopAC}[Repositories] +:MenuACAdminRemoteRegistries: menu:{MenuTopAC}[Remote Registries] +:MenuACAdminTasks: menu:{MenuTopAC}[Task Management] +:MenuACAdminCollectionApproval: menu:{MenuTopAC}[Collection Approvals] +:MenuACAdminRemotes: menu:{MenuTopAC}[Remotes] +:MenuACAPIToken: menu:{MenuTopAC}[API token] //Each of the services previously had selections for access which will be centralized, ultimately these should be changed to use the attributes in Access Management menu selections once automation hub is provide in the full ui platform experience in 2.5-next -:MenuHubUsers: menu:User Access[Users] +:MenuHubUsers: menu:{MenuAM}[Users] :MenuHubGroups: menu:User Access[Groups] -:MenuHubRoles: menu:User Access[Roles] +:MenuHubRoles: menu:{MenuAM}[Roles] // Automation Analytics menu selections - According to mockups, analytics will be included in the Gateway nav only includes Automation Calculator, Host Metrics and Subscription Usage, other settings are also included on the Ansible dashboard on the Hybrid Cloud Console https://console.redhat.com/ansible/ansible-dashboard :MenuAAReports: menu:{MenuAA}[Reports] @@ -274,3 +287,170 @@ // Not yet implemented but look to be in the future scope 2.5-next plan //:MenuSetLogin: {MenuAEAdminSettings}[Log In Settings] //:MenuSetUI: {MenuAEAdminSettings}[User Interface Settings] + +// Title and link attributes +// +// titles/troubleshooting-aap +:TitleTroubleshootingAAP: Troubleshooting Ansible Automation Platform +:URLTroubleshootingAAP: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/troubleshooting_ansible_automation_platform +:LinkTroubleshootingAAP: {URLTroubleshootingAAP}[{TitleTroubleshootingAAP}] +// +// titles/aap-plugin-rhdh-install +:TitlePluginRHDHInstall: Installing Ansible plug-ins for Red Hat Developer Hub +:URLPluginRHDHInstall: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/installing_ansible_plug-ins_for_red_hat_developer_hub +:LinkPluginRHDHInstall: {URLPluginRHDHInstall}[{TitlePluginRHDHInstall}] +// +// titles/aap-plugin-rhdh-using +:TitlePluginRHDHUsing: Using Ansible plug-ins for Red Hat Developer Hub +:URLPluginRHDHUsing: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/using_ansible_plug-ins_for_red_hat_developer_hub +:LinkPluginRHDHUsing: {URLPluginRHDHUsing}[{TitlePluginRHDHUsing}] +// +// titles/aap-operations-guide +:TitleAAPOperationsGuide: Operating Ansible Automation Platform +:URLAAPOperationsGuide: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/operating_ansible_automation_platform +:LinkAAPOperationsGuide: {URLAAPOperationsGuide}[{TitleAAPOperationsGuide}] +// +// titles/eda/eda-user-guide +:TitleEDAUserGuide: Using automation decisions +:URLEDAUserGuide: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/using_automation_decisions +:LinkEDAUserGuide: {URLEDAUserGuide}[{TitleEDAUserGuide}] +// +// titles/upgrade +:TitleUpgrade: RPM upgrade and migration +:URLUpgrade: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/rpm_upgrade_and_migration +:LinkUpgrade: {URLUpgrade}[{TitleUpgrade}] +// +// titles/aap-operator-installation +:TitleOperatorInstallation: Installing on OpenShift Container Platform +:URLOperatorInstallation: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/installing_on_openshift_container_platform +:LinkOperatorInstallation: {URLOperatorInstallation}[{TitleOperatorInstallation}] +// +// titles/aap-installation-guide +:TitleInstallationGuide: RPM installation +:URLInstallationGuide: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/rpm_installation +:LinkInstallationGuide: {URLInstallationGuide}[{TitleInstallationGuide}] +// +// titles/aap-planning-guide +:TitlePlanningGuide: Planning your installation +:URLPlanningGuide: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/planning_your_installation +:LinkPlanningGuide: {URLPlanningGuide}[{TitlePlanningGuide}] +// +// titles/operator-mesh +:TitleOperatorMesh: Automation mesh for managed cloud or operator environments +:URLOperatorMesh: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_mesh_for_managed_cloud_or_operator_environments +:LinkOperatorMesh: {URLOperatorMesh}[{TitleOperatorMesh}] +// +// titles/automation-mesh +:TitleAutomationMesh: Automation mesh for VM environments +:URLAutomationMesh: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_mesh_for_vm_environments +:LinkAutomationMesh: {URLAutomationMesh}[{TitleAutomationMesh}] +// +// titles/ocp_performance_guide +:TitleOCPPerformanceGuide: Performance considerations for operator environments +:URLOCPPerformanceGuide: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/performance_considerations_for_operator_environments +:LinkOCPPerformanceGuide: {URLOCPPerformanceGuide}[{TitleOCPPerformanceGuide}] +// +// titles/security-guide +:TitleSecurityGuide: Implementing security automation +:URLSecurityGuide: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/implementing_security_automation +:LinkSecurityGuide: {URLSecurityGuide}[{TitleSecurityGuide}] +// +// titles/playbooks/playbooks-getting-started +:TitlePlaybooksGettingStarted: Getting started with playbooks +:URLPlaybooksGettingStarted: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_playbooks +:LinkPlaybooksGettingStarted: {URLPlaybooksGettingStarted}[{TitlePlaybooksGettingStarted}] +// +// titles/playbooks/playbooks-reference +:TitlePlaybooksReference: Reference guide to Ansible Playbooks +:URLPlaybooksReference: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/reference_guide_to_ansible_playbooks +:LinkPlaybooksReference: {URLPlaybooksReference}[{TitlePlaybooksReference}] +// +// titles/release-notes +:TitleReleaseNotes: Release notes +:URLReleaseNotes: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/release_notes +:LinkReleaseNotes: {URLReleaseNotes}[{TitleReleaseNotes}] +// +// titles/controller/controller-user-guide +:TitleControllerUserGuide: Using automation execution +:URLControllerUserGuide: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/using_automation_execution +:LinkControllerUserGuide: {URLControllerUserGuide}[{TitleControllerUserGuide}] +// +// titles/controller/controller-admin-guide +:TitleControllerAdminGuide: Configuring automation execution +:URLControllerAdminGuide: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/configuring_automation_execution +:LinkControllerAdminGuide: {URLControllerAdminGuide}[{TitleControllerAdminGuide}] +// +// titles/controller/controller-api-overview +:TitleControllerAPIOverview: Automation execution API overview +:URLControllerAPIOverview: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_execution_api_overview +:LinkControllerAPIOverview: {URLControllerAPIOverview}[{TitleControllerAPIOverview}] +// +// titles/aap-operator-backup +:TitleOperatorBackup: Backup and recovery for operator environments +:URLOperatorBackup: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/backup_and_recovery_for_operator_environments +:LinkOperatorBackup: {URLOperatorBackup}[{TitleOperatorBackup}] +// +// titles/central-auth +:TitleCentralAuth: Access management and authentication +:URLCentralAuth: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/access_management_and_authentication +:LinkCentralAuth: {URLCentralAuth}[{TitleCentralAuth}] +// +// titles/getting-started +:TitleGettingStarted: Getting started with Ansible Automation Platform +:URLGettingStarted: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_ansible_automation_platform +:LinkGettingStarted: {URLGettingStarted}[{TitleGettingStarted}] +// +// titles/aap-containerized-install +:TitleContainerizedInstall: Containerized installation +:URLContainerizedInstall: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/containerized_installation +:LinkContainerizedInstall: {URLContainerizedInstall}[{TitleContainerizedInstall}] +// +// titles/navigator-guide +:TitleNavigatorGuide: Using content navigator +:URLNavigatorGuide: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/using_content_navigator +:LinkNavigatorGuide: {URLNavigatorGuide}[{TitleNavigatorGuide}] +// +// titles/aap-hardening +:TitleHardening: Hardening and compliance +:URLHardening: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/hardening_and_compliance +:LinkHardening: {URLHardening}[{TitleHardening}] +// +// titles/builder +:TitleBuilder: Creating and using execution environments +:URLBuilder: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/creating_and_using_execution_environments +:LinkBuilder: {URLBuilder}[{TitleBuilder}] +// +// titles/hub/managing-content +:TitleHubManagingContent: Managing automation content +:URLHubManagingContent: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/managing_automation_content +:LinkHubManagingContent: {URLHubManagingContent}[{TitleHubManagingContent}] +// +// titles/analytics +:TitleAnalytics: Using automation analytics +:URLAnalytics: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/using_automation_analytics +:LinkAnalytics: {URLAnalytics}[{TitleAnalytics}] +// +// titles/develop-automation-content +:TitleDevelopAutomationContent: Developing automation content +:URLDevelopAutomationContent: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/developing_automation_content +:LinkDevelopAutomationContent: {URLDevelopAutomationContent}[{TitleDevelopAutomationContent}] +// +// titles/topologies +:TitleTopologies: Tested deployment models +:URLTopologies: {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/tested_deployment_models +:LinkTopologies: {URLTopologies}[{TitleTopologies}] +// +// Lightspeed branch titles/lightspeed-user-guide +:TitleLightspeedUserGuide: Red Hat Ansible Lightspeed with IBM watsonx Code Assistant User Guide +:URLLightspeedUserGuide: {BaseURL}/red_hat_ansible_lightspeed_with_ibm_watsonx_code_assistant/2.x_latest/html/red_hat_ansible_lightspeed_with_ibm_watsonx_code_assistant_user_guide +:LinkLightspeedUserGuide: {URLLightspeedUserGuide}[{TitleLightspeedUserGuide}] +// +// Clouds branch titles/aap-on-azure +:TitleAzureGuide: Red Hat Ansible Automation Platform on Microsoft Azure Guide +:URLAzureGuide: {BaseURL}/ansible_on_clouds/2.x_latest/html/red_hat_ansible_automation_platform_on_microsoft_azure_guide +:LinkAzureGuide: {URLAzureGuide}[{TitleAzureGuide}] +// +// Clouds branch titles/saas-aws +:TitleSaaSAWSGuide: Red Hat Ansible Automation Platform Service on AWS +:URLSaaSAWSGuide: {BaseURL}/ansible_on_clouds/2.x_latest/html/red_hat_ansible_automation_platform_service_on_aws +:LinkSaaSAWSGuide: {URLSaaSAWSGuide}[{TitleSaaSAWSGuide}] diff --git a/downstream/images/AAP_dashboard_2.5.png b/downstream/images/AAP_dashboard_2.5.png new file mode 100644 index 0000000000..b9f17caa35 Binary files /dev/null and b/downstream/images/AAP_dashboard_2.5.png differ diff --git a/downstream/images/Subscription_tab.png b/downstream/images/Subscription_tab.png new file mode 100644 index 0000000000..808333ea97 Binary files /dev/null and b/downstream/images/Subscription_tab.png differ diff --git a/downstream/images/aap-network-ports-protocols.png b/downstream/images/aap-network-ports-protocols.png index 7e558fa42e..e3252b102a 100644 Binary files a/downstream/images/aap-network-ports-protocols.png and b/downstream/images/aap-network-ports-protocols.png differ diff --git a/downstream/images/account-linking-flow.png b/downstream/images/account-linking-flow.png new file mode 100644 index 0000000000..7a865445f0 Binary files /dev/null and b/downstream/images/account-linking-flow.png differ diff --git a/downstream/images/activity_stream_details.png b/downstream/images/activity_stream_details.png new file mode 100644 index 0000000000..75ebde3fe9 Binary files /dev/null and b/downstream/images/activity_stream_details.png differ diff --git a/downstream/images/activity_stream_page.png b/downstream/images/activity_stream_page.png new file mode 100644 index 0000000000..18ee045d90 Binary files /dev/null and b/downstream/images/activity_stream_page.png differ diff --git a/downstream/images/ansible-network-ports-protocols.png b/downstream/images/ansible-network-ports-protocols.png deleted file mode 100644 index 39f7c612aa..0000000000 Binary files a/downstream/images/ansible-network-ports-protocols.png and /dev/null differ diff --git a/downstream/images/automation_analytics.png b/downstream/images/automation_analytics.png new file mode 100644 index 0000000000..c60744f773 Binary files /dev/null and b/downstream/images/automation_analytics.png differ diff --git a/downstream/images/change_subscription.png b/downstream/images/change_subscription.png new file mode 100644 index 0000000000..83637502ad Binary files /dev/null and b/downstream/images/change_subscription.png differ diff --git a/downstream/images/cont-a-env-a.png b/downstream/images/cont-a-env-a.png new file mode 100644 index 0000000000..03be405778 Binary files /dev/null and b/downstream/images/cont-a-env-a.png differ diff --git a/downstream/images/cont-b-env-a.png b/downstream/images/cont-b-env-a.png new file mode 100644 index 0000000000..62339d1cec Binary files /dev/null and b/downstream/images/cont-b-env-a.png differ diff --git a/downstream/images/credential-types-drop-down-menu.png b/downstream/images/credential-types-drop-down-menu.png index 9ed1f34abd..b54f1a59f9 100644 Binary files a/downstream/images/credential-types-drop-down-menu.png and b/downstream/images/credential-types-drop-down-menu.png differ diff --git a/downstream/images/devtools-extension-navigator-output.png b/downstream/images/devtools-extension-navigator-output.png new file mode 100644 index 0000000000..789fc805fe Binary files /dev/null and b/downstream/images/devtools-extension-navigator-output.png differ diff --git a/downstream/images/devtools-extension-navigator-tasks.png b/downstream/images/devtools-extension-navigator-tasks.png new file mode 100644 index 0000000000..aa6f5bd5ba Binary files /dev/null and b/downstream/images/devtools-extension-navigator-tasks.png differ diff --git a/downstream/images/devtools-reopen-in-container.png b/downstream/images/devtools-reopen-in-container.png new file mode 100644 index 0000000000..3047f23bdf Binary files /dev/null and b/downstream/images/devtools-reopen-in-container.png differ diff --git a/downstream/images/eda-event-details.png b/downstream/images/eda-event-details.png index b107ef0780..2ebb7df88f 100644 Binary files a/downstream/images/eda-event-details.png and b/downstream/images/eda-event-details.png differ diff --git a/downstream/images/eda-event-streams-mapping-UI.png b/downstream/images/eda-event-streams-mapping-UI.png new file mode 100644 index 0000000000..16e2f972f5 Binary files /dev/null and b/downstream/images/eda-event-streams-mapping-UI.png differ diff --git a/downstream/images/eda-event-streams-swapping-sources.png b/downstream/images/eda-event-streams-swapping-sources.png new file mode 100644 index 0000000000..d83d34189f Binary files /dev/null and b/downstream/images/eda-event-streams-swapping-sources.png differ diff --git a/downstream/images/eda-forwarding-event-to-activation-toggle.png b/downstream/images/eda-forwarding-event-to-activation-toggle.png new file mode 100644 index 0000000000..3e46f739c1 Binary files /dev/null and b/downstream/images/eda-forwarding-event-to-activation-toggle.png differ diff --git a/downstream/images/eda-latest-event-streams-mapping.png b/downstream/images/eda-latest-event-streams-mapping.png new file mode 100644 index 0000000000..ad9250ed00 Binary files /dev/null and b/downstream/images/eda-latest-event-streams-mapping.png differ diff --git a/downstream/images/eda-payload-body-event-streams.png b/downstream/images/eda-payload-body-event-streams.png new file mode 100644 index 0000000000..d41506290e Binary files /dev/null and b/downstream/images/eda-payload-body-event-streams.png differ diff --git a/downstream/images/eda-rule-audit-event-streams.png b/downstream/images/eda-rule-audit-event-streams.png new file mode 100644 index 0000000000..d4c2a456d2 Binary files /dev/null and b/downstream/images/eda-rule-audit-event-streams.png differ diff --git a/downstream/images/eda-rule-audit-list-view.png b/downstream/images/eda-rule-audit-list-view.png index ea8b62bcdd..ccaedfe0d7 100644 Binary files a/downstream/images/eda-rule-audit-list-view.png and b/downstream/images/eda-rule-audit-list-view.png differ diff --git a/downstream/images/eda-verify-event-streams.png b/downstream/images/eda-verify-event-streams.png new file mode 100644 index 0000000000..1014005611 Binary files /dev/null and b/downstream/images/eda-verify-event-streams.png differ diff --git a/downstream/images/eda-verify-rulebook-attachment.png b/downstream/images/eda-verify-rulebook-attachment.png new file mode 100644 index 0000000000..d136609011 Binary files /dev/null and b/downstream/images/eda-verify-rulebook-attachment.png differ diff --git a/downstream/images/gw-clustered-redis.png b/downstream/images/gw-clustered-redis.png new file mode 100644 index 0000000000..743d8c05dc Binary files /dev/null and b/downstream/images/gw-clustered-redis.png differ diff --git a/downstream/images/gw-single-node-redis.png b/downstream/images/gw-single-node-redis.png new file mode 100644 index 0000000000..a02e42d9d2 Binary files /dev/null and b/downstream/images/gw-single-node-redis.png differ diff --git a/downstream/images/hosts_jobs_details.png b/downstream/images/hosts_jobs_details.png new file mode 100644 index 0000000000..757452dba1 Binary files /dev/null and b/downstream/images/hosts_jobs_details.png differ diff --git a/downstream/images/job-settings-full.png b/downstream/images/job-settings-full.png new file mode 100644 index 0000000000..ae00c4c174 Binary files /dev/null and b/downstream/images/job-settings-full.png differ diff --git a/downstream/images/logging-settings.png b/downstream/images/logging-settings.png new file mode 100644 index 0000000000..f9137a70c3 Binary files /dev/null and b/downstream/images/logging-settings.png differ diff --git a/downstream/images/logging-splunk-controller-example.png b/downstream/images/logging-splunk-controller-example.png new file mode 100644 index 0000000000..a568f61516 Binary files /dev/null and b/downstream/images/logging-splunk-controller-example.png differ diff --git a/downstream/images/ocp-a-env-a.png b/downstream/images/ocp-a-env-a.png new file mode 100644 index 0000000000..03fa781c3b Binary files /dev/null and b/downstream/images/ocp-a-env-a.png differ diff --git a/downstream/images/ocp-b-env-a.png b/downstream/images/ocp-b-env-a.png new file mode 100644 index 0000000000..22b6f99a95 Binary files /dev/null and b/downstream/images/ocp-b-env-a.png differ diff --git a/downstream/images/platform_gateway_full.png b/downstream/images/platform_gateway_full.png new file mode 100644 index 0000000000..99882b4d69 Binary files /dev/null and b/downstream/images/platform_gateway_full.png differ diff --git a/downstream/images/platform_gateway_settings_page.png b/downstream/images/platform_gateway_settings_page.png new file mode 100644 index 0000000000..f58f14cd5a Binary files /dev/null and b/downstream/images/platform_gateway_settings_page.png differ diff --git a/downstream/images/rhdh-ansible-plugin-architecture.png b/downstream/images/rhdh-ansible-plugin-architecture.png new file mode 100644 index 0000000000..5178397f0a Binary files /dev/null and b/downstream/images/rhdh-ansible-plugin-architecture.png differ diff --git a/downstream/images/rhdh-check-devtools-container.png b/downstream/images/rhdh-check-devtools-container.png new file mode 100644 index 0000000000..8fbaf48e36 Binary files /dev/null and b/downstream/images/rhdh-check-devtools-container.png differ diff --git a/downstream/images/rhdh-check-plugin-config.png b/downstream/images/rhdh-check-plugin-config.png new file mode 100644 index 0000000000..fde6bb35b4 Binary files /dev/null and b/downstream/images/rhdh-check-plugin-config.png differ diff --git a/downstream/images/rhdh-feedback-form.png b/downstream/images/rhdh-feedback-form.png new file mode 100644 index 0000000000..27c4ea9013 Binary files /dev/null and b/downstream/images/rhdh-feedback-form.png differ diff --git a/downstream/images/rhdh-plugin-dashboard.png b/downstream/images/rhdh-plugin-dashboard.png new file mode 100644 index 0000000000..2a0ca54da9 Binary files /dev/null and b/downstream/images/rhdh-plugin-dashboard.png differ diff --git a/downstream/images/rhdh-plugin-registry.png b/downstream/images/rhdh-plugin-registry.png new file mode 100644 index 0000000000..f7b0e1a0e1 Binary files /dev/null and b/downstream/images/rhdh-plugin-registry.png differ diff --git a/downstream/images/rhdh-vscode-run-playbook.png b/downstream/images/rhdh-vscode-run-playbook.png new file mode 100644 index 0000000000..4521589f43 Binary files /dev/null and b/downstream/images/rhdh-vscode-run-playbook.png differ diff --git a/downstream/images/rpm-a-env-a.png b/downstream/images/rpm-a-env-a.png new file mode 100644 index 0000000000..19ccb5d084 Binary files /dev/null and b/downstream/images/rpm-a-env-a.png differ diff --git a/downstream/images/rpm-a-env-b.png b/downstream/images/rpm-a-env-b.png new file mode 100644 index 0000000000..2e79b7fad6 Binary files /dev/null and b/downstream/images/rpm-a-env-b.png differ diff --git a/downstream/images/rpm-b-env-a.png b/downstream/images/rpm-b-env-a.png new file mode 100644 index 0000000000..ae29efe9b2 Binary files /dev/null and b/downstream/images/rpm-b-env-a.png differ diff --git a/downstream/images/rpm-b-env-b.png b/downstream/images/rpm-b-env-b.png new file mode 100644 index 0000000000..24e01a3ce2 Binary files /dev/null and b/downstream/images/rpm-b-env-b.png differ diff --git a/downstream/images/settings_subscription_page.png b/downstream/images/settings_subscription_page.png new file mode 100644 index 0000000000..d6287bea5e Binary files /dev/null and b/downstream/images/settings_subscription_page.png differ diff --git a/downstream/images/sort-order-example.png b/downstream/images/sort-order-example.png index 4d1ac14901..5392b9dcb7 100644 Binary files a/downstream/images/sort-order-example.png and b/downstream/images/sort-order-example.png differ diff --git a/downstream/images/subscriptions_first-page.png b/downstream/images/subscriptions_first-page.png new file mode 100644 index 0000000000..8a704b779e Binary files /dev/null and b/downstream/images/subscriptions_first-page.png differ diff --git a/downstream/images/svg/OCP-A_Env-A-R.svg b/downstream/images/svg/OCP-A_Env-A-R.svg new file mode 100644 index 0000000000..43e202c963 --- /dev/null +++ b/downstream/images/svg/OCP-A_Env-A-R.svg @@ -0,0 +1,661 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Page-1 + + + Sheet.34 + + + + Sheet.35 + Automation controller deployment + + + + Automation controller deployment + + Sheet.8 + Automation controller web pod + + + + Automation controller web pod + + Sheet.14 + Automation controller task pod + + + + Automation controller task pod + + Sheet.1 + Mesh ingress pod + + + + Mesh ingress pod + + Sheet.9 + Ingress + + + + Ingress + + Sheet.10 + Service + + + + Service + + Sheet.12 + Ingress + + + + Ingress + + Sheet.16 + Service + + + + Service + + Sheet.18 + + + + Sheet.20 + Automation hub deployment + + + + Automation hub deployment + + Sheet.21 + Automation hub web pod + + + + Automation hub web pod + + Sheet.22 + Automation hub content pod + + + + Automation hub content pod + + Sheet.28 + Ingress + + + + Ingress + + Sheet.30 + Service + + + + Service + + Sheet.27 + Automation hub API pod + + + + Automation hub API pod + + Sheet.36 + Automation hub worker pod + + + + Automation hub worker pod + + Sheet.54 + + + + Sheet.55 + Event Driven Ansible deployment + + + + Event Driven Ansible deployment + + Sheet.57 + Event Driven Ansible API pod + + + + Event Driven Ansible API pod + + Sheet.58 + Event Driven Ansible Activation pod + + + + Event Driven Ansible Activation pod + + Sheet.59 + Ingress + + + + Ingress + + Sheet.60 + Service + + + + Service + + Sheet.63 + Event Driven Ansible event stream pod + + + + Event Driven Ansible event stream pod + + Sheet.64 + Event Driven Ansible Scheduler pod + + + + Event Driven Ansible Scheduler pod + + Sheet.17 + + + + Sheet.19 + + + + Sheet.24 + + + + Sheet.29 + + + + Sheet.33 + + + + Sheet.39 + + + + Sheet.40 + + + + Sheet.42 + + + + Sheet.52 + + + + Sheet.65 + + + + Sheet.66 + + + + Sheet.67 + + + + Sheet.68 + + + + Sheet.70 + + + + Sheet.45 + + + + Sheet.47 + + + + Sheet.13 + + + + Sheet.51 + + + + Sheet.71 + + + + Sheet.74 + + + + Sheet.79 + + + + Sheet.25 + + + + Sheet.6 + + + + Sheet.15 + Platform gateway deployment + + + + Platform gateway deployment + + Sheet.23 + Platform gateway pod + + + + Platform gateway pod + + Sheet.3 + Ingress + + + + Ingress + + Sheet.5 + Service + + + + Service + + Sheet.80 + + + + Sheet.81 + + + + Sheet.82 + Ingress + + + + Ingress + + Sheet.83 + Service + + + + Service + + Sheet.46 + + + + Sheet.69 + + + + Sheet.85 + + + + Sheet.88 + + + + Sheet.89 + + + + Sheet.90 + + + + Sheet.91 + + + + Sheet.92 + + + + Sheet.94 + + + + Sheet.95 + Ansible Automation platform operator + + + + Ansible Automation platform operator + + Sheet.96 + + + + Sheet.97 + + + + Sheet.98 + + + + Sheet.99 + + + + Sheet.100 + 80/443 http(s) ingress + + + + 80/443 http(s) ingress + + Sheet.101 + PostgreSQL: 5432-database + + + + PostgreSQL: 5432-database + + Sheet.102 + Redis: 6379 – job control/caching + + + + Redis: 6379 – job control/caching + + Sheet.104 + Port 80/443 + + + + Port 80/443 + + Sheet.105 + Port 5432 + + + + Port 5432 + + Sheet.108 + + + + Sheet.109 + + + + Sheet.110 + + + + Sheet.56 + + + + Sheet.106 + Port 5432 + + + + Port 5432 + + Sheet.86 + + + + Sheet.84 + + + + Sheet.87 + + + + Sheet.111 + + + + Sheet.61 + Event Driven Annsibe Worker pod + + + + Event Driven Annsibe Worker pod + + Sheet.4 + + + + Sheet.26 + + + + Sheet.31 + + + + Sheet.32 + + + + Sheet.37 + + + + Sheet.62 + + + + Sheet.43 + + + + Sheet.11 + + + + Sheet.44 + + + + Sheet.48 + + + + Sheet.78 + + + + Sheet.103 + Port 80/443 + + + + Port 80/443 + + Sheet.49 + + + + Sheet.107 + Port 6379 + + + + Port 6379 + + Sheet.7 + + + + Sheet.50 + Postgres pod + + + + Postgres pod + + Sheet.53 + PVC + + + + PVC + + Sheet.75 + Service + + + + Service + + Sheet.76 + PVC + + + + PVC + + Sheet.77 + + + + Sheet.112 + Redis pod + + + + Redis pod + + Sheet.113 + PVC + + + + PVC + + Sheet.114 + Service + + + + Service + + diff --git a/downstream/images/svg/OCP-B_Env-A-R.svg b/downstream/images/svg/OCP-B_Env-A-R.svg new file mode 100644 index 0000000000..8a58e98de2 --- /dev/null +++ b/downstream/images/svg/OCP-B_Env-A-R.svg @@ -0,0 +1,660 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Page-1 + + + Sheet.34 + + + + Sheet.35 + Automation controller deployment + + + + Automation controller deployment + + Sheet.8 + Automation controller web pod + + + + Automation controller web pod + + Sheet.14 + Automation controller task pod + + + + Automation controller task pod + + Sheet.1 + Mesh ingress pod + + + + Mesh ingress pod + + Sheet.9 + Ingress + + + + Ingress + + Sheet.10 + Service + + + + Service + + Sheet.12 + Ingress + + + + Ingress + + Sheet.16 + Service + + + + Service + + Sheet.18 + + + + Sheet.20 + Automation hub deployment + + + + Automation hub deployment + + Sheet.21 + Automation hub web pod + + + + Automation hub web pod + + Sheet.22 + Automation hub content pod + + + + Automation hub content pod + + Sheet.28 + Ingress + + + + Ingress + + Sheet.30 + Service + + + + Service + + Sheet.27 + Automation hub API pod + + + + Automation hub API pod + + Sheet.36 + Automation hub worker pod + + + + Automation hub worker pod + + Sheet.38 + Automation hub worker pod 2 + + + + Automation hub worker pod 2 + + Sheet.54 + + + + Sheet.55 + Event Driven Ansible deployment + + + + Event Driven Ansible deployment + + Sheet.57 + Event Driven Ansible API pod + + + + Event Driven Ansible API pod + + Sheet.58 + Event Driven Ansible Activation pod + + + + Event Driven Ansible Activation pod + + Sheet.59 + Ingress + + + + Ingress + + Sheet.60 + Service + + + + Service + + Sheet.63 + Event Driven Ansible event stream pod + + + + Event Driven Ansible event stream pod + + Sheet.64 + Event Driven Ansible Scheduler pod + + + + Event Driven Ansible Scheduler pod + + Sheet.2 + Postgres (External) + + + + Postgres(External) + + Sheet.7 + Redis (External) + + + + Redis(External) + + Sheet.17 + + + + Sheet.19 + + + + Sheet.24 + + + + Sheet.29 + + + + Sheet.33 + + + + Sheet.39 + + + + Sheet.40 + + + + Sheet.41 + + + + Sheet.42 + + + + Sheet.52 + + + + Sheet.65 + + + + Sheet.66 + + + + Sheet.67 + + + + Sheet.68 + + + + Sheet.70 + + + + Sheet.13 + + + + Sheet.51 + + + + Sheet.74 + + + + Sheet.79 + + + + Sheet.25 + + + + Sheet.6 + + + + Sheet.15 + Platform gateway deployment + + + + Platform gateway deployment + + Sheet.23 + Platform gateway pod + + + + Platform gateway pod + + Sheet.3 + Ingress + + + + Ingress + + Sheet.5 + Service + + + + Service + + Sheet.80 + + + + Sheet.81 + + + + Sheet.82 + Ingress + + + + Ingress + + Sheet.83 + Service + + + + Service + + Sheet.46 + + + + Sheet.69 + + + + Sheet.89 + + + + Sheet.90 + + + + Sheet.91 + + + + Sheet.92 + + + + Sheet.94 + + + + Sheet.95 + Ansible Automation platform operator + + + + Ansible Automation platform operator + + Sheet.96 + + + + Sheet.97 + + + + Sheet.98 + + + + Sheet.99 + + + + Sheet.100 + 80/443 http(s) ingress + + + + 80/443 http(s) ingress + + Sheet.101 + PostgreSQL: 5432-database + + + + PostgreSQL: 5432-database + + Sheet.102 + Redis: 6379 – job control/caching + + + + Redis: 6379 – job control/caching + + Sheet.104 + Port 80/443 + + + + Port 80/443 + + Sheet.105 + Port 5432 + + + + Port 5432 + + Sheet.108 + + + + Sheet.109 + + + + Sheet.110 + + + + Sheet.56 + + + + Sheet.106 + Port 5432 + + + + Port 5432 + + Sheet.84 + + + + Sheet.87 + + + + Sheet.112 + + Sheet.113 + + + + Sheet.114 + + + + Sheet.115 + + + + Sheet.116 + + + + Sheet.117 + + + + Sheet.118 + + + + + Sheet.119 + + Sheet.120 + + + + Sheet.121 + + + + Sheet.122 + + + + Sheet.123 + + + + Sheet.124 + + + + Sheet.125 + + + + + Sheet.61 + Event Driven Annsibe Worker pod + + + + Event Driven Annsibe Worker pod + + Sheet.4 + + + + Sheet.26 + + + + Sheet.31 + + + + Sheet.32 + + + + Sheet.37 + + + + Sheet.62 + + + + Sheet.43 + + + + Sheet.11 + + + + Sheet.44 + + + + Sheet.48 + + + + Sheet.78 + + + + Sheet.103 + Port 80/443 + + + + Port 80/443 + + Sheet.49 + + + + Sheet.107 + Port 6379 + + + + Port 6379 + + diff --git a/downstream/images/svg/st_CONT_B_Env_A-R.svg b/downstream/images/svg/st_CONT_B_Env_A-R.svg new file mode 100644 index 0000000000..9d20cd8681 --- /dev/null +++ b/downstream/images/svg/st_CONT_B_Env_A-R.svg @@ -0,0 +1,1070 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Page-1 + + + Sheet.5 + + + + Sheet.10 + Automation mesh + + + + Automation mesh + + Sheet.52 + + + + Sheet.65 + HA proxy/ load balancer + + + + HA proxy/ load balancer + + Sheet.40 + + + + Sheet.48 + + + + Sheet.55 + + + + Sheet.81 + + + + Sheet.98 + + + + Sheet.102 + + + + Sheet.103 + + + + Sheet.104 + + + + Sheet.105 + + + + Sheet.106 + 80/443 http(s) ingress + + + + 80/443 http(s) ingress + + Sheet.107 + PostgreSQL: 5432-database + + + + PostgreSQL: 5432-database + + Sheet.108 + Redis: 6379 – job control/caching + + + + Redis: 6379 – job control/caching + + Sheet.109 + Receptor:27199 – work/job execution + + + + Receptor:27199 – work/job execution + + Sheet.45 + Port 6379 + + + + Port 6379 + + Sheet.113 + + + + Sheet.14 + + + + Sheet.34 + Port 27199 + + + + Port 27199 + + Sheet.53 + + + + Sheet.57 + Port 6379 + + + + Port 6379 + + Sheet.54 + + + + Sheet.18 + Port 27199 + + + + Port 27199 + + Sheet.86 + + + + Sheet.87 + Hop node + + + + Hop node + + Sheet.9 + Automation mesh container + + + + Automation mesh container + + Sheet.134 + + + + Sheet.135 + 50051 grpc + + + + 50051 grpc + + Sheet.139 + + + + Sheet.24 + + + + Sheet.35 + + + + Sheet.36 + + + + Sheet.38 + Automation hub + + + + Automation hub + + Sheet.39 + Automation hub container + + + + Automation hub container + + Sheet.110 + redis + + + + redis + + Sheet.112 + Port 6379 + + + + Port 6379 + + Sheet.118 + + + + Sheet.119 + Automation controller + + + + Automation controller + + Sheet.120 + Automation controller container + + + + Automation controller container + + Sheet.149 + + + + Sheet.100 + + + + Sheet.121 + + + + Sheet.122 + + + + Sheet.148 + + + + Sheet.82 + + + + Sheet.150 + + + + Sheet.59 + + + + Sheet.123 + + + + Sheet.46 + + + + Sheet.151 + Hop node + + + + Hop node + + Sheet.19 + + + + Sheet.22 + Platform gateway + + + + Platform gateway + + Sheet.28 + Platform gateway container + + + + Platform gateway container + + Sheet.33 + redis + + + + redis + + Sheet.162 + + Sheet.12 + + + + Sheet.17 + Event-Driven Ansible + + + + Event-Driven Ansible + + Sheet.25 + Event-Driven Ansible container + + + + Event-Driven Ansible container + + Sheet.29 + redis + + + + redis + + + Sheet.163 + + + + Sheet.61 + + + + Sheet.84 + + + + Sheet.136 + Automation hub + + + + Automation hub + + Sheet.140 + Automation hub container + + + + Automation hub container + + Sheet.141 + redis + + + + redis + + Sheet.144 + + + + Sheet.145 + Event-Driven Ansible + + + + Event-Driven Ansible + + Sheet.146 + Event-Driven Ansible container + + + + Event-Driven Ansible container + + Sheet.147 + redis + + + + redis + + Sheet.90 + + + + Sheet.94 + Automation controller + + + + Automation controller + + Sheet.115 + Automation controller container + + + + Automation controller container + + Sheet.164 + + + + Sheet.124 + + + + Sheet.125 + + + + Sheet.169 + + + + Sheet.170 + + + + Sheet.79 + + + + Sheet.80 + Port 80/443 + + + + Port 80/443 + + Sheet.88 + + + + Sheet.3 + + + + Sheet.78 + + + + Sheet.99 + Port 80/443 + + + + Port 80/443 + + Sheet.66 + Port 16379 + + + + Port 16379 + + Sheet.58 + + + + Sheet.49 + + + + Sheet.62 + + + + Sheet.26 + + + + Sheet.63 + + + + Sheet.60 + + + + Sheet.64 + + + + Sheet.1 + Port 6379 + + + + Port 6379 + + Sheet.173 + + + + Sheet.111 + + + + Sheet.142 + Port 16379 + + + + Port 16379 + + Sheet.171 + Port 6379 + + + + Port 6379 + + Sheet.37 + + Sheet.176 + + + + Sheet.177 + + + + Sheet.175 + + + + Sheet.178 + + + + Sheet.183 + + + + Sheet.186 + + + + + Sheet.21 + Port 27199 + + + + Port 27199 + + Sheet.43 + + Sheet.152 + + + + Sheet.154 + Automation mesh container + + + + Automation mesh container + + Sheet.155 + Automation mesh container + + + + Automation mesh container + + Sheet.156 + Execution node + + + + Execution node + + + Sheet.44 + + Sheet.157 + + + + Sheet.159 + Automation mesh container + + + + Automation mesh container + + Sheet.160 + Automation mesh container + + + + Automation mesh container + + Sheet.161 + Execution node + + + + Execution node + + + Sheet.187 + + + + Sheet.188 + + + + Sheet.189 + + + + Sheet.190 + + + + Sheet.191 + + + + Sheet.192 + + + + Sheet.193 + + + + Sheet.194 + + + + Sheet.195 + + + + Sheet.196 + + + + Sheet.197 + + + + Sheet.198 + + + + Sheet.199 + + + + Sheet.200 + + + + Sheet.201 + + + + Sheet.202 + + + + Sheet.203 + + + + Sheet.204 + + + + Sheet.51 + + + + Sheet.97 + + + + Sheet.72 + + + + Sheet.69 + + + + Sheet.165 + + Sheet.6 + + + + Sheet.15 + Platform gateway + + + + Platform gateway + + Sheet.23 + Platform gateway container + + + + Platform gateway container + + Sheet.30 + redis + + + + redis + + + Sheet.133 + + + + Sheet.32 + + + + Sheet.50 + Port 16379 + + + + Port 16379 + + Sheet.137 + Port 50051 + + + + Port 50051 + + Sheet.92 + Port 80/443 + + + + Port 80/443 + + Sheet.131 + Port 5432 + + + + Port 5432 + + Sheet.205 + + + + Sheet.2 + + + + Sheet.116 + + + + Sheet.95 + Port 80/443 + + + + Port 80/443 + + Sheet.20 + Port 5432 + + + + Port 5432 + + Sheet.89 + Port 5432 + + + + Port 5432 + + Sheet.126 + Port 5432 + + + + Port 5432 + + Sheet.96 + Port 80/443 + + + + Port 80/443 + + Sheet.42 + Port 80/443 + + + + Port 80/443 + + Sheet.129 + Port 5432 + + + + Port 5432 + + Sheet.130 + Port 5432 + + + + Port 5432 + + Sheet.206 + + + + Sheet.31 + + + + Sheet.27 + Port 27199 + + + + Port 27199 + + Sheet.207 + Port 80/443 + + + + Port 80/443 + + Sheet.208 + Port 80/443 + + + + Port 80/443 + + Sheet.209 + Port 80/443 + + + + Port 80/443 + + Sheet.210 + Port 80/443 + + + + Port 80/443 + + Sheet.211 + Port 80/443 + + + + Port 80/443 + + Sheet.212 + Port 80/443 + + + + Port 80/443 + + Sheet.127 + Port 5432 + + + + Port 5432 + + Sheet.16 + Postgres (External) + + + + Postgres (External) + + Sheet.128 + Port 5432 + + + + Port 5432 + + diff --git a/downstream/images/svg/st_Cont-A_Env-A-R.svg b/downstream/images/svg/st_Cont-A_Env-A-R.svg new file mode 100644 index 0000000000..b65483f408 --- /dev/null +++ b/downstream/images/svg/st_Cont-A_Env-A-R.svg @@ -0,0 +1,404 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Page-1 + + + Sheet.1 + Example proxy/load balancer + + + + Example proxy/load balancer + + Sheet.2 + Platform gateway containers + + + + Platformgateway containers + + Sheet.3 + Event Driven Ansible containers + + + + Event Driven Ansible containers + + Sheet.4 + Automation controller containers + + + + Automation controller containers + + Sheet.5 + Automation hub containers + + + + Automation hub containers + + Sheet.6 + Postgres container + + + + Postgres container + + Sheet.7 + Redis container + + + + Redis container + + Sheet.8 + Automation mesh container + + + + Automation mesh container + + Sheet.9 + Execution container + + + + Execution container + + Sheet.10 + + + + Sheet.11 + + + + Sheet.12 + + + + Sheet.13 + + + + Sheet.14 + + + + Sheet.15 + + + + Sheet.19 + + + + Sheet.16 + + + + Sheet.17 + + + + Sheet.18 + + + + Sheet.20 + + + + Sheet.21 + + + + Sheet.24 + + + + Sheet.25 + + + + Sheet.27 + Port 5432 + + + + Port 5432 + + Sheet.28 + Port 27199 + + + + Port 27199 + + Sheet.29 + Port 6379 + + + + Port 6379 + + Sheet.30 + Port 80/443 + + + + Port 80/443 + + Sheet.31 + Port 80/443 + + + + Port 80/443 + + Sheet.32 + + + + Sheet.33 + Port 80/443 + + + + Port 80/443 + + Sheet.34 + + + + Sheet.35 + Port 80/443 + + + + Port 80/443 + + Sheet.36 + + + + Sheet.37 + + + + Sheet.38 + + + + Sheet.39 + + + + Sheet.40 + + + + Sheet.41 + 80/443 http(s) ingress + + + + 80/443 http(s) ingress + + Sheet.42 + PostgreSQL: 5432-database + + + + PostgreSQL: 5432-database + + Sheet.43 + Redis: 6379 – job control/caching + + + + Redis: 6379 – job control/caching + + Sheet.44 + Receptor:27199 – work/job execution + + + + Receptor:27199 – work/job execution + + Sheet.45 + + + + Sheet.46 + Ansible automation platform + + + + Ansible automation platform + + Sheet.47 + + + + Sheet.48 + + + + Sheet.49 + + + + Sheet.50 + + + + Sheet.51 + + + + Sheet.54 + + + + Sheet.56 + + + + Sheet.57 + + + + Sheet.58 + + + + Sheet.23 + + + + Sheet.26 + + + + Sheet.22 + + + + Sheet.59 + + + + Sheet.53 + + + + Sheet.60 + + + + Sheet.52 + + + + Sheet.55 + Port 80/443 + + + + Port 80/443 + + diff --git a/downstream/images/svg/st_Network-R.svg b/downstream/images/svg/st_Network-R.svg new file mode 100644 index 0000000000..7b1d36eb84 --- /dev/null +++ b/downstream/images/svg/st_Network-R.svg @@ -0,0 +1,1020 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Page-1 + + + Sheet.26 + + + + Sheet.27 + + + + Sheet.28 + Port 8443 + + + + Port 8443 + + Sheet.24 + + + + Sheet.25 + gateway.node + + + + gateway.node + + Sheet.33 + Platform gateway + + + + Platform gateway + + Sheet.22 + + + + Sheet.23 + gateway.node + + + + gateway.node + + Sheet.32 + Platform gatway + + + + Platform gatway + + Sheet.8 + + + + Sheet.9 + eda.node + + + + eda.node + + Sheet.36 + Automation EDA controller + + + + Automation EDAcontroller + + Sheet.4 + + + + Sheet.5 + SSH/WinRm/HTTP/etc + + + + SSH/WinRm/HTTP/etc + + Sheet.58 + Managed node + + + + Managed node + + Sheet.41 + + + + Sheet.47 + redis.node + + + + redis.node + + Sheet.56 + + + + Sheet.57 + redis.node + + + + redis.node + + Sheet.46 + + Sheet.12 + + + + Sheet.13 + hub.node + + + + hub.node + + + Sheet.51 + Automation hub + + + + Automation hub + + Sheet.18 + + + + Sheet.19 + database.node + + + + database.node + + Sheet.54 + PostgreSQL database + + + + PostgreSQL database + + Sheet.64 + + + + Sheet.65 + Port 16379 + + + + Port 16379 + + Sheet.66 + Redis cluster node + + + + Redis cluster node + + Sheet.67 + Redis + + + + Redis + + Sheet.73 + + + + Sheet.74 + + + + Sheet.80 + + + + Sheet.85 + + + + Sheet.87 + Port 5432 + + + + Port 5432 + + Sheet.93 + + + + Sheet.94 + + + + Sheet.96 + + + + Sheet.97 + + + + Sheet.68 + + + + Sheet.69 + + + + Sheet.71 + Ingress + + + + Ingress + + Sheet.70 + ingress.node + + + + ingress.node + + Sheet.109 + + + + Sheet.112 + Port 6379 + + + + Port 6379 + + Sheet.79 + + + + Sheet.89 + Port 5432 + + + + Port 5432 + + Sheet.119 + + + + Sheet.121 + Port 5432 + + + + Port 5432 + + Sheet.120 + + + + Sheet.49 + + + + Sheet.124 + + + + Sheet.128 + + + + Sheet.130 + + + + Sheet.131 + + + + Sheet.132 + + + + Sheet.133 + + + + Sheet.134 + + + + Sheet.135 + 80/443 http(s) ingress + + + + 80/443 http(s) ingress + + Sheet.136 + PostgreSQL: 5432-database + + + + PostgreSQL: 5432-database + + Sheet.137 + Redis: (1)6379 – job control/caching + + + + Redis: (1)6379 – job control/caching + + Sheet.138 + Receptor:27199 – work/job execution + + + + Receptor:27199 – work/job execution + + Sheet.139 + External: Varied + + + + External: Varied + + Sheet.98 + Port 27199 + + + + Port 27199 + + Sheet.95 + Port 27199 + + + + Port 27199 + + Sheet.35 + Port 80/443 + + + + Port 80/443 + + Sheet.72 + Port 80/443 + + + + Port 80/443 + + Sheet.149 + + Sheet.125 + + + + Sheet.126 + NFs/S3/etc + + + + NFs/S3/etc + + Sheet.127 + External storage + + + + External storage + + + Sheet.151 + + Sheet.48 + + Sheet.6 + + + + Sheet.7 + hop.node + + + + hop.node + + + Sheet.55 + Hop node + + + + Hop node + + + Sheet.152 + + Sheet.90 + + + + Sheet.91 + activation.node + + + + activation.node + + Sheet.92 + EDA activation + + + + EDA activation + + + Sheet.153 + + Sheet.20 + + + + Sheet.21 + + + + Sheet.52 + External event system + + + + External event system + + + Sheet.154 + + Sheet.16 + + + + Sheet.17 + worker.node + + + + worker.node + + Sheet.34 + EDA webhook worker + + + + EDA webhook worker + + + Sheet.129 + + + + Sheet.156 + + + + Sheet.101 + + + + Sheet.155 + + Sheet.10 + + + + Sheet.11 + exec.node + + + + exec.node + + Sheet.50 + Execution node + + + + Execution node + + + Sheet.157 + + + + Sheet.77 + + + + Sheet.123 + Port 80/443 + + + + Port 80/443 + + Sheet.158 + + + + Sheet.114 + + + + Sheet.159 + + + + Sheet.160 + + + + Sheet.81 + + + + Sheet.78 + Port 80/443 + + + + Port 80/443 + + Sheet.141 + Port 5432 + + + + Port 5432 + + Sheet.29 + Port 50051 grpc + + + + Port 50051grpc + + Sheet.82 + Port 80/443 + + + + Port 80/443 + + Sheet.14 + + Sheet.1 + + + + Sheet.2 + controller.node + + + + controller.node + + Sheet.3 + Automation controller + + + + Automation controller + + + Sheet.15 + + + + Sheet.146 + + + + Sheet.147 + Port 80/443 + + + + Port 80/443 + + Sheet.30 + + + + Sheet.116 + + + + Sheet.118 + Port 80/443 + + + + Port 80/443 + + Sheet.31 + + + + Sheet.161 + + + + Sheet.140 + Client + + + + Client + + Sheet.162 + Port 80/443 + + + + Port 80/443 + + Sheet.83 + + + + Sheet.84 + + + + Sheet.103 + + + + Sheet.108 + + + + Sheet.113 + + + + Sheet.115 + + + + Sheet.102 + + + + Sheet.122 + Port80/443 + + + + Port80/443 + + Sheet.143 + + + + Sheet.150 + + + + Sheet.165 + + + + Sheet.144 + + + + Sheet.148 + + + + + + Sheet.145 + Port 80/443 + + + + Port 80/443 + + Sheet.166 + + + + Sheet.142 + + + + Sheet.167 + + + + Sheet.75 + + + + Sheet.168 + Port80/443 + + + + Port80/443 + + Sheet.170 + + + + Sheet.76 + + + + Sheet.169 + Port80/443 + + + + Port80/443 + + Sheet.171 + + + + Sheet.172 + + + + Sheet.99 + + + + Sheet.117 + + + + Sheet.105 + + + + Sheet.111 + Port 6379 + + + + Port 6379 + + Sheet.110 + Port 6379 + + + + Port 6379 + + Sheet.107 + + + + Sheet.104 + + + + Sheet.106 + + + + Sheet.86 + + + + Sheet.88 + Port 5432 + + + + Port 5432 + + Sheet.173 + + + + Sheet.174 + 8443 default gateway port + + + + 8443 default gateway port + + Sheet.100 + Port80/443 + + + + Port80/443 + + diff --git a/downstream/images/svg/st_Proxy.svg b/downstream/images/svg/st_Proxy.svg new file mode 100644 index 0000000000..509fefdfac --- /dev/null +++ b/downstream/images/svg/st_Proxy.svg @@ -0,0 +1,370 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Page-1 + + + Sheet.21 + + + + Sheet.4 + + + + Sheet.5 + Automation controller VM + + + + Automation controller VM + + Sheet.6 + Automation controller + + + + Automation controller + + Sheet.7 + + + + Sheet.8 + Event-Driven ansible VM + + + + Event-Driven ansible VM + + Sheet.9 + Event-Driven ansible + + + + Event-Driven ansible + + Sheet.10 + + + + Sheet.11 + Automation hub VM + + + + Automation hub VM + + Sheet.12 + Automation hub + + + + Automation hub + + Sheet.19 + + Sheet.1 + + + + Sheet.2 + Platform gateway VM + + + + Platform gateway VM + + Sheet.3 + Platform gateway + + + + Platform gateway + + + Sheet.20 + + Sheet.16 + + + + Sheet.17 + database.node + + + + database.node + + Sheet.18 + Database + + + + Database + + + Sheet.22 + + + + Sheet.23 + + + + Sheet.24 + + + + Sheet.25 + + + + Sheet.26 + + + + Sheet.27 + + + + Sheet.28 + + + + Sheet.30 + Internet gateway + + + + Internet gateway + + Sheet.31 + + + + Sheet.32 + Loadbalancer + + + + Loadbalancer + + Sheet.33 + Default security group + + + + Default security group + + Sheet.29 + Proxy (squid) + + + + Proxy (squid) + + Sheet.34 + + + + Sheet.35 + + + + Sheet.36 + All traffic allowed + + + + All traffic allowed + + Sheet.37 + HTTPS traffic through Port 3128 + + + + HTTPS traffic through Port 3128 + + Sheet.38 + + + + Sheet.39 + SSH traffic Port 22 + + + + SSH trafficPort 22 + + Sheet.40 + + + + Sheet.41 + Access API and UI (HTTPS) through public IPS + + + + Access API and UI (HTTPS) through public IPS + + Sheet.42 + + + + Sheet.43 + All outbound traffic + + + + All outbound traffic + + Sheet.44 + Restricted security group + + + + Restricted security group + + Sheet.45 + + + + Sheet.46 + Control plane VPC + + + + Control plane VPC + + Sheet.47 + Port 80/443 + + + + Port 80/443 + + Sheet.48 + Port 80/443 + + + + Port 80/443 + + Sheet.49 + Port 80/443 + + + + Port 80/443 + + Sheet.50 + + Sheet.51 + + + + Sheet.53 + Automation mesh + + + + Automation mesh + + + Sheet.52 + + + + Sheet.54 + Port 27199 + + + + Port 27199 + + Sheet.13 + Incoming only during install + + + + Incoming only during install + + diff --git a/downstream/images/svg/st_RPM_A_Env_A-R.svg b/downstream/images/svg/st_RPM_A_Env_A-R.svg new file mode 100644 index 0000000000..4453cfb456 --- /dev/null +++ b/downstream/images/svg/st_RPM_A_Env_A-R.svg @@ -0,0 +1,367 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Page-1 + + + Sheet.5 + + + + Sheet.10 + Automation mesh + + + + Automation mesh + + Sheet.48 + + + + Sheet.102 + + + + Sheet.103 + + + + Sheet.104 + + + + Sheet.105 + + + + Sheet.106 + 80/443 http(s) ingress + + + + 80/443 http(s) ingress + + Sheet.107 + PostgreSQL: 5432-database + + + + PostgreSQL: 5432-database + + Sheet.108 + Redis: 6379 – job control/caching + + + + Redis: 6379 – job control/caching + + Sheet.109 + Receptor:27199 – work/job execution + + + + Receptor:27199 – work/job execution + + Sheet.9 + Execution VM + + + + Execution VM + + Sheet.69 + + + + Sheet.117 + + + + Sheet.20 + Port 5432 + + + + Port 5432 + + Sheet.89 + Port 5432 + + + + Port 5432 + + Sheet.7 + Automation hub VM + + + + Automation hub VM + + Sheet.19 + Event Driven Ansible VM + + + + Event Driven Ansible VM + + Sheet.8 + Automation Controller VM + + + + Automation Controller VM + + Sheet.29 + + + + Sheet.38 + + + + Sheet.40 + + + + Sheet.132 + + + + Sheet.133 + + + + Sheet.31 + + + + Sheet.18 + Port 27199 + + + + Port 27199 + + Sheet.135 + Port 80/443 + + + + Port 80/443 + + Sheet.1 + + + + Sheet.28 + Port 80/443 + + + + Port 80/443 + + Sheet.46 + + + + Sheet.32 + + + + Sheet.25 + Port 80/443 + + + + Port 80/443 + + Sheet.15 + + + + Sheet.92 + Port 80/443 + + + + Port 80/443 + + Sheet.34 + Port 27199 + + + + Port 27199 + + Sheet.2 + + + + Sheet.12 + + + + Sheet.17 + + + + Sheet.21 + + + + Sheet.3 + + Sheet.6 + + + + Sheet.13 + Redis + + + + Redis + + Sheet.14 + Platform gateway VM + + + + Platform gateway VM + + + Sheet.16 + + Sheet.22 + + + + Sheet.23 + + + + Sheet.24 + + + + Sheet.26 + + + + Sheet.27 + + + + Sheet.30 + + + + + Sheet.33 + Postgres + + + + Postgres + + Sheet.49 + + + + Sheet.39 + Port 6379 + + + + Port 6379 + + diff --git a/downstream/images/svg/st_RPM_A_Env_B-R.svg b/downstream/images/svg/st_RPM_A_Env_B-R.svg new file mode 100644 index 0000000000..d0123b6892 --- /dev/null +++ b/downstream/images/svg/st_RPM_A_Env_B-R.svg @@ -0,0 +1,379 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Page-1 + + + Sheet.5 + + + + Sheet.10 + Automation mesh + + + + Automation mesh + + Sheet.48 + + + + Sheet.102 + + + + Sheet.103 + + + + Sheet.104 + + + + Sheet.105 + + + + Sheet.106 + 80/443 http(s) ingress + + + + 80/443 http(s) ingress + + Sheet.107 + PostgreSQL: 5432-database + + + + PostgreSQL: 5432-database + + Sheet.108 + Redis: 6379 – job control/caching + + + + Redis: 6379 – job control/caching + + Sheet.109 + Receptor:27199 – work/job execution + + + + Receptor:27199 – work/job execution + + Sheet.9 + Execution VM + + + + Execution VM + + Sheet.69 + + + + Sheet.117 + + + + Sheet.20 + Port 5432 + + + + Port 5432 + + Sheet.89 + Port 5432 + + + + Port 5432 + + Sheet.7 + Automation hub VM + + + + Automation hub VM + + Sheet.8 + Automation Controller VM + + + + Automation Controller VM + + Sheet.29 + + + + Sheet.38 + + + + Sheet.40 + + + + Sheet.132 + + + + Sheet.133 + + + + Sheet.31 + + + + Sheet.18 + Port 27199 + + + + Port 27199 + + Sheet.135 + Port 80/443 + + + + Port 80/443 + + Sheet.1 + + + + Sheet.28 + Port 80/443 + + + + Port 80/443 + + Sheet.46 + + + + Sheet.32 + + + + Sheet.25 + Port 80/443 + + + + Port 80/443 + + Sheet.15 + + + + Sheet.92 + Port 80/443 + + + + Port 80/443 + + Sheet.34 + Port 27199 + + + + Port 27199 + + Sheet.2 + + + + Sheet.12 + + + + Sheet.17 + + + + Sheet.21 + + + + Sheet.3 + + Sheet.6 + + + + Sheet.13 + Redis + + + + Redis + + Sheet.14 + Platform gateway VM + + + + Platform gateway VM + + + Sheet.16 + + Sheet.22 + + + + Sheet.23 + + + + Sheet.24 + + + + Sheet.26 + + + + Sheet.27 + + + + Sheet.30 + + + + + Sheet.33 + Postgres + + + + Postgres + + Sheet.19 + Event Driven Ansible VM + + + + Event Driven Ansible VM + + Sheet.136 + + + + Sheet.49 + + + + Sheet.39 + Port 6379 + + + + Port 6379 + + diff --git a/downstream/images/svg/st_RPM_B_Env_A-R.svg b/downstream/images/svg/st_RPM_B_Env_A-R.svg new file mode 100644 index 0000000000..726553ceb2 --- /dev/null +++ b/downstream/images/svg/st_RPM_B_Env_A-R.svg @@ -0,0 +1,1039 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Page-1 + + + Sheet.5 + + + + Sheet.10 + Automation mesh + + + + Automation mesh + + Sheet.52 + + + + Sheet.40 + + + + Sheet.48 + + + + Sheet.55 + + + + Sheet.81 + + + + Sheet.98 + + + + Sheet.102 + + + + Sheet.103 + + + + Sheet.104 + + + + Sheet.105 + + + + Sheet.106 + 80/443 http(s) ingress + + + + 80/443 http(s) ingress + + Sheet.107 + PostgreSQL: 5432-database + + + + PostgreSQL: 5432-database + + Sheet.108 + Redis: 6379 – job control/caching + + + + Redis: 6379 – job control/caching + + Sheet.109 + Receptor:27199 – work/job execution + + + + Receptor:27199 – work/job execution + + Sheet.45 + Port 6379 + + + + Port 6379 + + Sheet.113 + + + + Sheet.14 + + + + Sheet.34 + Port 27199 + + + + Port 27199 + + Sheet.2 + + + + Sheet.53 + + + + Sheet.57 + Port 6379 + + + + Port 6379 + + Sheet.54 + + + + Sheet.18 + Port 27199 + + + + Port 27199 + + Sheet.86 + + + + Sheet.9 + Hop node VM + + + + Hop node VM + + Sheet.134 + + + + Sheet.135 + 50051 grpc + + + + 50051 grpc + + Sheet.138 + + + + Sheet.139 + + + + Sheet.35 + + + + Sheet.36 + + + + Sheet.38 + Automation hub + + + + Automation hub + + Sheet.39 + Automation hub VM2 + + + + Automation hub VM2 + + Sheet.110 + redis + + + + redis + + Sheet.112 + Port 6379 + + + + Port 6379 + + Sheet.118 + + + + Sheet.119 + Automation controller + + + + Automation controller + + Sheet.120 + Automation controller VM2 + + + + Automation controller VM2 + + Sheet.149 + + + + Sheet.100 + + + + Sheet.121 + + + + Sheet.122 + + + + Sheet.148 + + + + Sheet.82 + + + + Sheet.150 + + + + Sheet.59 + + + + Sheet.123 + + + + Sheet.151 + Hop node + + + + Hop node + + Sheet.22 + Platform gateway + + + + Platform gateway + + Sheet.12 + + + + Sheet.17 + Event-Driven Ansible + + + + Event-Driven Ansible + + Sheet.25 + Event-Driven Ansible VM2 + + + + Event-Driven Ansible VM2 + + Sheet.29 + redis + + + + redis + + Sheet.116 + + + + Sheet.84 + + + + Sheet.136 + Automation hub + + + + Automation hub + + Sheet.140 + Automation hub VM1 + + + + Automation hub VM1 + + Sheet.141 + redis + + + + redis + + Sheet.144 + + + + Sheet.145 + Event-Driven Ansible + + + + Event-Driven Ansible + + Sheet.146 + Event-Driven Ansible VM1 + + + + Event-Driven Ansible VM1 + + Sheet.147 + redis + + + + redis + + Sheet.90 + + + + Sheet.94 + Automation controller + + + + Automation controller + + Sheet.115 + Automation controller VM1 + + + + Automation controller VM1 + + Sheet.164 + + + + Sheet.169 + + + + Sheet.170 + + + + Sheet.79 + + + + Sheet.80 + Port 80/443 + + + + Port 80/443 + + Sheet.88 + + + + Sheet.3 + + + + Sheet.78 + + + + Sheet.99 + Port 80/443 + + + + Port 80/443 + + Sheet.66 + Port 16379 + + + + Port 16379 + + Sheet.58 + + + + Sheet.49 + + + + Sheet.63 + + + + Sheet.60 + + + + Sheet.64 + + + + Sheet.1 + Port 6379 + + + + Port 6379 + + Sheet.173 + + + + Sheet.111 + + + + Sheet.142 + Port 16379 + + + + Port 16379 + + Sheet.171 + Port 6379 + + + + Port 6379 + + Sheet.37 + + Sheet.176 + + + + Sheet.177 + + + + Sheet.175 + + + + Sheet.178 + + + + Sheet.183 + + + + Sheet.186 + + + + + Sheet.21 + Port 27199 + + + + Port 27199 + + Sheet.43 + + Sheet.152 + + + + Sheet.154 + Execution VM + + + + Execution VM + + Sheet.156 + Execution node + + + + Execution node + + + Sheet.44 + + Sheet.56 + + + + Sheet.67 + Execution VM + + + + Execution VM + + Sheet.68 + Execution node + + + + Execution node + + + Sheet.187 + + + + Sheet.188 + + + + Sheet.189 + + + + Sheet.190 + + + + Sheet.191 + + + + Sheet.192 + + + + Sheet.193 + + + + Sheet.194 + + + + Sheet.65 + HA proxy/ load balancer + + + + HA proxy/ load balancer + + Sheet.195 + + + + Sheet.196 + + + + Sheet.197 + + + + Sheet.198 + + + + Sheet.199 + + + + Sheet.200 + + + + Sheet.51 + + + + Sheet.163 + + + + Sheet.61 + + + + Sheet.129 + Port 5432 + + + + Port 5432 + + Sheet.131 + Port 5432 + + + + Port 5432 + + Sheet.46 + + Sheet.19 + + + + Sheet.28 + Platform gateway VM2 + + + + Platform gateway VM2 + + Sheet.33 + redis + + + + redis + + + Sheet.128 + Port 5432 + + + + Port 5432 + + Sheet.16 + Postgres (External) + + + + Postgres (External) + + Sheet.31 + + + + Sheet.27 + Port 27199 + + + + Port 27199 + + Sheet.47 + + + + Sheet.70 + + + + Sheet.71 + + + + Sheet.73 + + + + Sheet.74 + + + + Sheet.69 + + + + Sheet.72 + + + + Sheet.165 + + Sheet.6 + + + + Sheet.15 + Platform gateway + + + + Platform gateway + + Sheet.23 + Platform gateway VM1 + + + + Platform gateway VM1 + + Sheet.30 + redis + + + + redis + + + Sheet.89 + Port 5432 + + + + Port 5432 + + Sheet.95 + Port 80/443 + + + + Port 80/443 + + Sheet.92 + Port 80/443 + + + + Port 80/443 + + Sheet.96 + Port 80/443 + + + + Port 80/443 + + Sheet.127 + Port 5432 + + + + Port 5432 + + Sheet.20 + Port 5432 + + + + Port 5432 + + Sheet.126 + Port 5432 + + + + Port 5432 + + Sheet.75 + + + + Sheet.97 + + + + Sheet.42 + Port 80/443 + + + + Port 80/443 + + Sheet.62 + + + + Sheet.26 + + + + Sheet.76 + Platform gateway + + + + Platform gateway + + Sheet.85 + Port80/443 + + + + Port80/443 + + Sheet.87 + Port80/443 + + + + Port80/443 + + Sheet.91 + Port80/443 + + + + Port80/443 + + Sheet.101 + Port80/443 + + + + Port80/443 + + Sheet.201 + + + + Sheet.124 + + + + Sheet.125 + + + + Sheet.32 + + + + Sheet.50 + Port 16379 + + + + Port 16379 + + Sheet.202 + + + + Sheet.133 + + + + Sheet.137 + Port 50051 + + + + Port 50051 + + Sheet.83 + Port80/443 + + + + Port80/443 + + Sheet.77 + Port80/443 + + + + Port80/443 + + diff --git a/downstream/images/svg/st_RPM_B_Env_B.svg b/downstream/images/svg/st_RPM_B_Env_B.svg new file mode 100644 index 0000000000..e5c825bfea --- /dev/null +++ b/downstream/images/svg/st_RPM_B_Env_B.svg @@ -0,0 +1,785 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Page-1 + + + Sheet.25 + + + + Sheet.24 + + + + Sheet.67 + + + + Sheet.2 + Ansible Automation Platform + + + + Ansible Automation Platform + + Sheet.5 + + + + Sheet.9 + Execution node + + + + Execution node + + Sheet.10 + Automation mesh + + + + Automation mesh + + Sheet.11 + + + + Sheet.51 + Hop node + + + + Hop node + + Sheet.52 + + + + Sheet.18 + Port 27199 + + + + Port 27199 + + Sheet.27 + Port 27199 + + + + Port 27199 + + Sheet.13 + Execution node + + + + Execution node + + Sheet.7 + + + + Sheet.62 + Automation hub 2.4 x + + + + Automation hub 2.4 x + + Sheet.88 + Automation hub VM + + Sheet.64 + + + + + + Automation hub VM + + + Sheet.89 + Automation hub VM + + Sheet.63 + + + + + + Automation hub VM + + + Sheet.80 + + + + Sheet.81 + + + + Sheet.82 + + + + Sheet.83 + + + + Sheet.84 + + + + Sheet.86 + 80/443 http(s) ingress + + + + 80/443 http(s) ingress + + Sheet.90 + PostgreSQL: 5432-database + + + + PostgreSQL: 5432-database + + Sheet.91 + Redis: (1)6379 – job control/caching + + + + Redis: (1)6379 – job control/caching + + Sheet.92 + Receptor:27199 – work/job execution + + + + Receptor:27199 – work/job execution + + Sheet.97 + + + + Sheet.3 + + + + Sheet.96 + Port 80/443 + + + + Port 80/443 + + Sheet.41 + HA proxy/ load balancer + + + + HA proxy/ load balancer + + Sheet.42 + + + + Sheet.65 + + + + Sheet.46 + Port 27199 + + + + Port 27199 + + Sheet.102 + + + + Sheet.1 + + Sheet.8 + + + + Sheet.12 + + + + Sheet.17 + + + + Sheet.19 + + + + Sheet.22 + + + + Sheet.85 + + + + + Sheet.29 + + Sheet.55 + + + + Sheet.87 + Event-Driven ansible + + + + Event-Driven ansible + + Sheet.100 + + + + Sheet.101 + + + + Sheet.103 + + + + Sheet.122 + Event-Driven ansible VM + + + + Event-Driven ansible VM + + Sheet.123 + redis + + + + redis + + Sheet.124 + redis + + + + redis + + Sheet.125 + redis + + + + redis + + Sheet.126 + Event-Driven ansible VM + + + + Event-Driven ansible VM + + Sheet.127 + Event-Driven ansible VM + + + + Event-Driven ansible VM + + + Sheet.56 + + Sheet.6 + + + + Sheet.15 + Platform gateway + + + + Platform gateway + + Sheet.23 + + + + Sheet.30 + + + + Sheet.105 + + + + Sheet.106 + Platform gateway VM + + + + Platform gateway VM + + Sheet.107 + redis + + + + redis + + Sheet.57 + redis + + + + redis + + Sheet.58 + redis + + + + redis + + Sheet.28 + Platform gateway VM + + + + Platform gateway VM + + Sheet.33 + Platform gateway VM + + + + Platform gateway VM + + + Sheet.4 + + + + Sheet.14 + Automation controller 2.4 x + + + + Automation controller 2.4 x + + Sheet.21 + + + + Sheet.16 + Automation controller VM + + + Automation controller VM + + Sheet.32 + + + + Sheet.31 + Automation controller VM + + + Automation controller VM + + Sheet.118 + + + + Sheet.115 + + + + Sheet.35 + + + + Sheet.44 + + + + Sheet.43 + + + + Sheet.95 + + + + Sheet.59 + + + + Sheet.108 + + + + Sheet.53 + + + + Sheet.54 + + + + Sheet.109 + Port 16379 + + + + Port 16379 + + Sheet.60 + Port 80/443 + + + + Port 80/443 + + Sheet.110 + + + + Sheet.61 + + + + Sheet.112 + Port 16379 + + + + Port 16379 + + Sheet.66 + + + + Sheet.26 + + + + Sheet.104 + Port 80/443 + + + + Port 80/443 + + Sheet.73 + + + + Sheet.74 + + + + Sheet.99 + Port 5432 + + + + Port 5432 + + Sheet.20 + Port 5432 + + + + Port 5432 + + Sheet.45 + Port 5432 + + + + Port 5432 + + Sheet.128 + + + + Sheet.40 + Port 80/443 + + + + Port 80/443 + + Sheet.129 + + + + Sheet.130 + + + + Sheet.132 + + + + Sheet.133 + Port80/443 + + + + Port80/443 + + Sheet.134 + Port80/443 + + + + Port80/443 + + Sheet.136 + + + + Sheet.68 + + + + Sheet.70 + + + + Sheet.38 + Port 5432 + + + + Port 5432 + + Sheet.137 + + + + Sheet.138 + + + + Sheet.69 + Postgres external database + + + + Postgres external database + + Sheet.139 + + + + Sheet.37 + + + + Sheet.98 + + + + Sheet.36 + Port 80/443 + + + + Port 80/443 + + Sheet.39 + + + + Sheet.113 + + + + Sheet.47 + + + + Sheet.114 + + + + Sheet.140 + + + + Sheet.131 + + + + Sheet.141 + + + + Sheet.72 + + + + Sheet.135 + Port443 + + + + Port443 + + Sheet.142 + + + + Sheet.94 + + + + Sheet.34 + Port 6379 + + + + Port 6379 + + Sheet.116 + Port 6379 + + + + Port 6379 + + diff --git a/downstream/images/system-settings-full.png b/downstream/images/system-settings-full.png new file mode 100644 index 0000000000..c949e1f31d Binary files /dev/null and b/downstream/images/system-settings-full.png differ diff --git a/downstream/images/system-settings-page.png b/downstream/images/system-settings-page.png new file mode 100644 index 0000000000..c9c5e8c5e9 Binary files /dev/null and b/downstream/images/system-settings-page.png differ diff --git a/downstream/images/system_settings_page.png b/downstream/images/system_settings_page.png new file mode 100644 index 0000000000..ed329b561f Binary files /dev/null and b/downstream/images/system_settings_page.png differ diff --git a/downstream/images/troubleshooting_options.png b/downstream/images/troubleshooting_options.png new file mode 100644 index 0000000000..6dc2374af7 Binary files /dev/null and b/downstream/images/troubleshooting_options.png differ diff --git a/downstream/images/ug-jobs-events-summary.png b/downstream/images/ug-jobs-events-summary.png index aa7d41ca78..8d230009f4 100644 Binary files a/downstream/images/ug-jobs-events-summary.png and b/downstream/images/ug-jobs-events-summary.png differ diff --git a/downstream/images/ug-schedules-sample-list.png b/downstream/images/ug-schedules-sample-list.png index 3d428b8ed5..b0e4022e7d 100644 Binary files a/downstream/images/ug-schedules-sample-list.png and b/downstream/images/ug-schedules-sample-list.png differ diff --git a/downstream/images/ug-scm-project-branching-emphasized.png b/downstream/images/ug-scm-project-branching-emphasized.png index 0936e3d2ca..84041d07da 100644 Binary files a/downstream/images/ug-scm-project-branching-emphasized.png and b/downstream/images/ug-scm-project-branching-emphasized.png differ diff --git a/downstream/images/ug-wf-add-template.png b/downstream/images/ug-wf-add-template.png index f048be3d74..f29ad85ca6 100644 Binary files a/downstream/images/ug-wf-add-template.png and b/downstream/images/ug-wf-add-template.png differ diff --git a/downstream/images/ug-wf-approval-node.png b/downstream/images/ug-wf-approval-node.png index b37e860dd9..3f7c4bc04a 100644 Binary files a/downstream/images/ug-wf-approval-node.png and b/downstream/images/ug-wf-approval-node.png differ diff --git a/downstream/images/ug-wf-create-sibling-node.png b/downstream/images/ug-wf-create-sibling-node.png index 1e064e7352..9f13f87cfd 100644 Binary files a/downstream/images/ug-wf-create-sibling-node.png and b/downstream/images/ug-wf-create-sibling-node.png differ diff --git a/downstream/images/ug-wf-dropdown-list.png b/downstream/images/ug-wf-dropdown-list.png index c9c0196816..95c10fca50 100644 Binary files a/downstream/images/ug-wf-dropdown-list.png and b/downstream/images/ug-wf-dropdown-list.png differ diff --git a/downstream/images/ug-wf-editor-convergent-node-all.png b/downstream/images/ug-wf-editor-convergent-node-all.png index 8415682c5f..cb7bf6d340 100644 Binary files a/downstream/images/ug-wf-editor-convergent-node-all.png and b/downstream/images/ug-wf-editor-convergent-node-all.png differ diff --git a/downstream/images/user_preferences_page.png b/downstream/images/user_preferences_page.png new file mode 100644 index 0000000000..c77ef8abd7 Binary files /dev/null and b/downstream/images/user_preferences_page.png differ diff --git a/downstream/images/vscode-extensions-icon.png b/downstream/images/vscode-extensions-icon.png new file mode 100644 index 0000000000..2e693328ed Binary files /dev/null and b/downstream/images/vscode-extensions-icon.png differ diff --git a/downstream/images/vscode-remote-icon.png b/downstream/images/vscode-remote-icon.png new file mode 100644 index 0000000000..473846b34a Binary files /dev/null and b/downstream/images/vscode-remote-icon.png differ diff --git a/downstream/images/workflow.png b/downstream/images/workflow.png new file mode 100644 index 0000000000..d0765e76d5 Binary files /dev/null and b/downstream/images/workflow.png differ diff --git a/downstream/modules/aap-hardening/.platform b/downstream/modules/aap-hardening/.platform new file mode 120000 index 0000000000..1d58796b7d --- /dev/null +++ b/downstream/modules/aap-hardening/.platform @@ -0,0 +1 @@ +../platform \ No newline at end of file diff --git a/downstream/modules/aap-hardening/con-aap-additional-software.adoc b/downstream/modules/aap-hardening/con-aap-additional-software.adoc index a60ec75177..40b778fa1e 100644 --- a/downstream/modules/aap-hardening/con-aap-additional-software.adoc +++ b/downstream/modules/aap-hardening/con-aap-additional-software.adoc @@ -7,6 +7,9 @@ [role="_abstract"] -When installing the {PlatformNameShort} components on {RHEL} servers, the {RHEL} servers should be dedicated to that use alone. Additional server capabilities should not be installed in addition to {PlatformNameShort}, as this is an unsupported configuration and may affect the security and performance of the {PlatformNameShort} software. +When installing the {PlatformNameShort} components on {RHEL} servers, the {RHEL} servers should be dedicated to that use alone. +Additional server capabilities must not be installed in addition to {PlatformNameShort}, as this is an unsupported configuration and might affect the security and performance of the {PlatformNameShort} software. -Similarly, when {PlatformNameShort} is deployed on a {RHEL} host, it installs software like the nginx web server, the Pulp software repository, and the PostgreSQL database server. This software should not be modified or used in a more generic fashion (for example, do not use nginx to server additional website content or PostgreSQL to host additional databases) as this is an unsupported configuration and may affect the security and performance of {PlatformNameShort}. The configuration of this software is managed by the {PlatformNameShort} installer, and any manual changes might be undone when performing upgrades. \ No newline at end of file +Similarly, when {PlatformNameShort} is deployed on a {RHEL} host, it installs software like the nginx web server, the Pulp software repository, and the PostgreSQL database server (unless a user-provided external database is used). +This software should not be modified or used in a more generic fashion (for example, do not use nginx to serve additional website content or PostgreSQL to host additional databases) as this is an unsupported configuration and may affect the security and performance of {PlatformNameShort}. +The configuration of this software is managed by the {PlatformNameShort} installer, and any manual changes might be undone when performing upgrades. \ No newline at end of file diff --git a/downstream/modules/aap-hardening/con-automation-use-secrets.adoc b/downstream/modules/aap-hardening/con-automation-use-secrets.adoc index 7b9ae4c4f5..3c74d908ea 100644 --- a/downstream/modules/aap-hardening/con-automation-use-secrets.adoc +++ b/downstream/modules/aap-hardening/con-automation-use-secrets.adoc @@ -15,8 +15,16 @@ You can grant users and teams the ability to use these credentials without actually exposing the credential to the user. This means that if a user moves to a different team or leaves the organization, you don’t have to re-key all of your systems. -{ControllerName} uses SSH (or the Windows equivalent) to connect to remote hosts . To pass the key from the {ControllerName} to SSH, the key must be decrypted before it can be written to a named pipe. {ControllerNameStart} then uses that pipe to send the key to SSH (so that it is never written to disk). If passwords are used, the {ControllerName} handles those by responding directly to the password prompt and decrypting the password before writing it to the prompt. +{ControllerNameStart} uses SSH (or the Windows equivalent) to connect to remote hosts. +To pass the key from {ControllerName} to SSH, the key must be decrypted before it can be written to a named pipe. +{ControllerNameStart} then uses that pipe to send the key to SSH (so that it is never written to disk). +If passwords are used, {ControllerName} handles those by responding directly to the password prompt and decrypting the password before writing it to the prompt. As an administrator with superuser access, you can define a custom credential type in a standard format using a YAML/JSON-like definition, enabling the assignment of new credential types to jobs and inventory updates. This enables you to define a custom credential type that works in ways similar to existing credential types. For example, you can create a custom credential type that injects an API token for a third-party web service into an environment variable, which your playbook or custom inventory script can consume. -To encrypt secret fields, {PlatformNameShort} uses AES in CBC mode with a 256-bit key for encryption, PKCS7 padding, and HMAC using SHA256 for authentication. The encryption/decryption process derives the AES-256 bit encryption key from the `SECRET_KEY`, the field name of the model field, and the database-assigned auto-incremented record ID. Thus, if any attribute used in the key generation process changes, {PlatformNameShort} fails to correctly decrypt the secret. {PlatformNameShort} is designed such that the `SECRET_KEY` is never readable in playbooks {PlatformNameShort} launches, so that these secrets are never readable by {PlatformNameShort} users, and no secret field values are ever made available through the {PlatformNameShort} REST API. If a secret value is used in a playbook, you must use `no_log` on the task so that it is not accidentally logged. For more information, see link:https://docs.ansible.com/ansible/latest/reference_appendices/logging.html#protecting-sensitive-data-with-no-log[Protecting sensitive data with no log]. +To encrypt secret fields, {PlatformNameShort} uses the _Advanced Encryption Standard_ (AES) in _Cipher Block Chaining_ (CBC) mode with a 256-bit key for encryption, _Public-Key cryptography Standard_ (PKCS7) padding, and _Hash-Based Message Authentication Code_ (HMAC) using SHA256 for authentication. +The encryption/decryption process derives the AES-256 bit encryption key from the `SECRET_KEY`, the field name of the model field, and the database-assigned auto-incremented record ID. +Thus, if any attribute used in the key generation process changes, {PlatformNameShort} fails to correctly decrypt the secret. +{PlatformNameShort} is designed such that the `SECRET_KEY` is never readable in playbooks {PlatformNameShort} launches. +This means that these secrets are never readable by {PlatformNameShort} users, and no secret field values are ever made available through the {PlatformNameShort} REST API. +If a secret value is used in a playbook, you must use `no_log` on the task so that it is not accidentally logged. For more information, see link:https://docs.ansible.com/ansible/latest/reference_appendices/logging.html#protecting-sensitive-data-with-no-log[Protecting sensitive data with no log]. diff --git a/downstream/modules/aap-hardening/con-benefits-of-patch-automation.adoc b/downstream/modules/aap-hardening/con-benefits-of-patch-automation.adoc new file mode 100644 index 0000000000..ae1e776644 --- /dev/null +++ b/downstream/modules/aap-hardening/con-benefits-of-patch-automation.adoc @@ -0,0 +1,10 @@ +[id="con-benefits-of-patch-automation"] + += Benefits of patch automation + +Automating the patching process provides a number of benefits: + +* Reduces error-prone manual effort. +* Decreases time to deploy patches at scale. +* Ensures consistency of patches across similar systems. Manual patching of similar systems can result in human error (forgetting one or more, patching using different versions) that impacts consistency. +* Enables orchestration of complex patching scenarios where an update might require taking a system snapshot before applying a patch, or might require additional configuration changes when the patch is applied. diff --git a/downstream/modules/aap-hardening/con-compliance-profile-considerations.adoc b/downstream/modules/aap-hardening/con-compliance-profile-considerations.adoc new file mode 100644 index 0000000000..f8c7f601d2 --- /dev/null +++ b/downstream/modules/aap-hardening/con-compliance-profile-considerations.adoc @@ -0,0 +1,9 @@ +[id="con-compliance-profile-considerations"] + += Compliance profile considerations + +In many environments, {PlatformNameShort} might need to be installed on {RHEL} systems where security controls have been applied to meet the requirements of a compliance profile such as CIS Critical Security Controls, _Payment Card Industry/Data Security Standard_ (PCI/DSS), the DISA STIG, or a similar profile. +In these environments, there are a specific set of security controls that might need to be modified for {PlatformNameShort} to run properly. +Apply any compliance profile controls to the {RHEL} servers being used for {PlatformNameShort} before installation, and then modify the following security controls as required. + +In environments where these controls are required, discuss waiving the controls with your security auditor. diff --git a/downstream/modules/aap-hardening/con-credential-management-planning.adoc b/downstream/modules/aap-hardening/con-credential-management-planning.adoc index 31667f2ea3..45ffd2c9a2 100644 --- a/downstream/modules/aap-hardening/con-credential-management-planning.adoc +++ b/downstream/modules/aap-hardening/con-credential-management-planning.adoc @@ -7,14 +7,20 @@ [role="_abstract"] -{ControllerNameStart} uses credentials to authenticate requests to jobs against machines, synchronize with inventory sources, and import project content from a version control system. {ControllerNameStart} manages three sets of secrets: +{PlatformName} uses credentials to authenticate requests to jobs against machines, synchronize with inventory sources, and import project content from a version control system. {ControllerNameStart} manages three sets of secrets: -* User passwords for *local automation controller users*. See the xref:con-user-authentication-planning_{context}[User Authentication Planning] section of this guide for additional details. -* Secrets for automation controller *operational use* (database password, message bus password, and so on). +* User passwords for *local {PlatformNameShort} users*. +//See the xref:con-user-authentication-planning_{context}[User Authentication Planning] section of this guide for additional details. +* Secrets for {PlatformNameShort} *operational use* (database password, message bus password, and so on). * Secrets for *automation use* (SSH keys, cloud credentials, external password vault credentials, and so on). Implementing a privileged access or credential management solution to protect credentials from compromise is a highly recommended practice. Organizations should audit the use of, and provide additional programmatic control over, access and privilege escalation. -You can further secure automation credentials by ensuring they are unique and stored only in {ControllerName}. Services such as OpenSSH can be configured to allow credentials on connections only from specific addresses. Use different credentials for automation from those used by system administrators to log into a server. Although direct access should be limited where possible, it can be used for disaster recovery or other ad-hoc management purposes, allowing for easier auditing. +You can further secure automation credentials by ensuring they are unique and stored only in {ControllerName}. +Services such as OpenSSH can be configured to allow credentials on connections only from specific addresses. +Use different credentials for automation from those used by system administrators to log in to a server. +Although direct access should be limited where possible, it can be used for disaster recovery or other ad-hoc management purposes, allowing for easier auditing. -Different automation jobs might need to access a system at different levels. For example, you can have low-level system automation that applies patches and performs security baseline checking, while a higher-level piece of automation deploys applications. By using different keys or credentials for each piece of automation, the effect of any one key vulnerability is minimized. This also allows for easy baseline auditing. +Different automation jobs might need to access a system at different levels. +For example, you can have low-level system automation that applies patches and performs security baseline checking, while a higher-level piece of automation deploys applications. +By using different keys or credentials for each piece of automation, the effect of any one key vulnerability is minimized. This also allows for easy baseline auditing. diff --git a/downstream/modules/aap-hardening/con-day-two-operations.adoc b/downstream/modules/aap-hardening/con-day-two-operations.adoc index f7e587847b..bbb3efb314 100644 --- a/downstream/modules/aap-hardening/con-day-two-operations.adoc +++ b/downstream/modules/aap-hardening/con-day-two-operations.adoc @@ -7,4 +7,4 @@ [role="_abstract"] -Day 2 Operations include Cluster Health and Scaling Checks, including Host, Project, and environment level Sustainment. You should continually analyze configuration and security drift. \ No newline at end of file +Day 2 Operations include Cluster Health and Scaling Checks, including Host, Project, and environment level Sustainment. You must continually analyze configuration and security drift. \ No newline at end of file diff --git a/downstream/modules/aap-hardening/con-deployment-methods.adoc b/downstream/modules/aap-hardening/con-deployment-methods.adoc new file mode 100644 index 0000000000..a1a88ec063 --- /dev/null +++ b/downstream/modules/aap-hardening/con-deployment-methods.adoc @@ -0,0 +1,16 @@ +[id="con-deployment-methods"] + += {PlatformName} deployment methods + +There are three different installation methods for {PlatformNameShort}: + +* RPM-based on {RHEL} +* Container-based on {RHEL} +* Operator-based on {OCP} + +This document offers guidance on hardening {PlatformNameShort} when installed using either of the first two installation methods (RPM-based or container-based). +This document further recommends using the container-based installation method for new deployments, as the RPM-based installer will be deprecated in a future release. + +For further information, see link:{URLReleaseNotes}/aap-2.5-deprecated-features#aap-2.5-deprecated-features[Deprecated features]. + +Operator-based deployments are not described in this document. \ No newline at end of file diff --git a/downstream/modules/aap-hardening/con-external-credential-vault.adoc b/downstream/modules/aap-hardening/con-external-credential-vault.adoc index 39db35c1e8..13a05ea2c3 100644 --- a/downstream/modules/aap-hardening/con-external-credential-vault.adoc +++ b/downstream/modules/aap-hardening/con-external-credential-vault.adoc @@ -7,8 +7,11 @@ [role="_abstract"] -Secrets management is an essential component of maintaining a secure automation platform. We recommend the following secrets management practices: +Secrets management is an essential component of maintaining a secure automation platform. +We recommend the following secrets management practices: -* Ensure that there are no unauthorized users with access to the system, and ensure that only users who require access are granted it. {ControllerNameStart} encrypts sensitive information such as passwords and API tokens, but also stores the key to decryption. Authorized users potentially have access to everything. +* Ensure that there are no unauthorized users with access to the system, and ensure that only users who require access are granted it. +{ControllerNameStart} encrypts sensitive information such as passwords and API tokens, but also stores the key to decryption. +Authorized users potentially have access to everything. -* Use an external system to manage secrets. In cases where credentials need to be updated, an external system can retrieve updated credentials with less complexity than an internal system. External systems for managing secrets include CyberArk, HashiCorp Vault, {Azure} Key Management, and others. For more information, see the link:https://docs.ansible.com/automation-controller/4.4/html/userguide/credential_plugins.html#secret-management-system[Secret Management System] section of the {ControllerUG} v4.4. \ No newline at end of file +* Use an external system to manage secrets. In cases where credentials need to be updated, an external system can retrieve updated credentials with less complexity than an internal system. External systems for managing secrets include CyberArk, HashiCorp Vault, {Azure} Key Management, and others. For more information, see the link:https://docs.ansible.com/automation-controller/4.4/html/userguide/credential_plugins.html#secret-management-system[Secret Management System] section of {ControllerUG}. \ No newline at end of file diff --git a/downstream/modules/aap-hardening/con-install-secure-host.adoc b/downstream/modules/aap-hardening/con-install-secure-host.adoc index fd23a45dc8..4162a1121a 100644 --- a/downstream/modules/aap-hardening/con-install-secure-host.adoc +++ b/downstream/modules/aap-hardening/con-install-secure-host.adoc @@ -7,8 +7,14 @@ [role="_abstract"] -The {PlatformNameShort} installer can be run from one of the infrastructure servers, such as an {ControllerName}, or from an external system that has SSH access to the {PlatformNameShort} infrastructure servers. The {PlatformNameShort} installer is also used not just for installation, but for subsequent day-two operations, such as backup and restore, as well as upgrades. This guide recommends performing installation and day-two operations from a dedicated external server, hereafter referred to as the installation host. Doing so eliminates the need to log in to one of the infrastructure servers to run these functions. The installation host must only be used for management of {PlatformNameShort} and must not run any other services or software. +The {PlatformNameShort} installer can be run from one of the infrastructure servers, such as an {ControllerName}, or from an external system that has SSH access to the {PlatformNameShort} infrastructure servers. +The {PlatformNameShort} installer is also used not just for installation, but for subsequent day-two operations, such as backup and restore, as well as upgrades. +This guide recommends performing installation and day-two operations from a dedicated external server, hereafter referred to as the installation host. +Doing so eliminates the need to log in to one of the infrastructure servers to run these functions. The installation host must only be used for management of {PlatformNameShort} and must not run any other services or software. -The installation host must be a {RHEL} server that has been installed and configured in accordance with link:{BaseURL}/red_hat_enterprise_linux/8/html/security_hardening/index[Security hardening for Red Hat Enterprise Linux] and any security profile requirements relevant to your organization (CIS, STIG, and so on). Obtain the {PlatformNameShort} installer as described in the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/red_hat_ansible_automation_platform_planning_guide/index#choosing_and_obtaining_a_red_hat_ansible_automation_platform_installer[Automation Platform Planning Guide], and create the installer inventory file as describe in the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/red_hat_ansible_automation_platform_installation_guide/index#proc-editing-installer-inventory-file_platform-install-scenario[Automation Platform Installation Guide]. This inventory file is used for upgrades, adding infrastructure components, and day-two operations by the installer, so preserve the file after installation for future operational use. +The installation host must be a {RHEL} server that has been installed and configured in accordance with link:{BaseURL}/red_hat_enterprise_linux/9/html/security_hardening/index[Security hardening for Red Hat Enterprise Linux] and any security profile requirements relevant to your organization (CIS, STIG, and so on). +Obtain the {PlatformNameShort} installer as described in the link:{URLPlanningGuide}/choosing_and_obtaining_a_red_hat_ansible_automation_platform_installer[Planning your installation], and create the installer inventory file as described in the link:{URLInstallationGuide}/assembly-platform-install-scenario#proc-editing-installer-inventory-file_platform-install-scenario[Editing the Red Hat Ansible Automation Platform installer inventory file]. +This inventory file is used for upgrades, adding infrastructure components, and day-two operations by the installer, so preserve the file after installation for future operational use. -Access to the installation host must be restricted only to those personnel who are responsible for managing the {PlatformNameShort} infrastructure. Over time, it will contain sensitive information, such as the installer inventory (which contains the initial login credentials for {PlatformNameShort}), copies of user-provided PKI keys and certificates, backup files, and so on. The installation host must also be used for logging in to the {PlatformNameShort} infrastructure servers through SSH when necessary for infrastructure management and maintenance. +Access to the installation host must be restricted only to those personnel who are responsible for managing the {PlatformNameShort} infrastructure. +Over time, it will contain sensitive information, such as the installer inventory (which contains the initial login credentials for {PlatformNameShort}), copies of user-provided PKI keys and certificates, backup files, and so on. The installation host must also be used for logging in to the {PlatformNameShort} infrastructure servers through SSH when necessary for infrastructure management and maintenance. diff --git a/downstream/modules/aap-hardening/con-installation.adoc b/downstream/modules/aap-hardening/con-installation.adoc index 64589fd3c9..ed4d5fcaa8 100644 --- a/downstream/modules/aap-hardening/con-installation.adoc +++ b/downstream/modules/aap-hardening/con-installation.adoc @@ -7,4 +7,6 @@ [role="_abstract"] -There are installation-time decisions that affect the security posture of {PlatformNameShort}. The installation process includes setting a number of variables, some of which are relevant to the hardening of the {PlatformNameShort} infrastructure. Before installing {PlatformNameShort}, consider the guidance in the installation section of this guide. \ No newline at end of file +There are installation-time decisions that affect the security posture of {PlatformNameShort}. +The installation process includes setting a number of variables, some of which are relevant to the hardening of the {PlatformNameShort} infrastructure. +Before installing {PlatformNameShort}, consider the guidance in the installation section of this guide. \ No newline at end of file diff --git a/downstream/modules/aap-hardening/con-logging-log-capture.adoc b/downstream/modules/aap-hardening/con-logging-log-capture.adoc index 230010399b..2deb4a45a7 100644 --- a/downstream/modules/aap-hardening/con-logging-log-capture.adoc +++ b/downstream/modules/aap-hardening/con-logging-log-capture.adoc @@ -7,8 +7,22 @@ [role="_abstract"] -Visibility and analytics is an important pillar of Enterprise Security and Zero Trust Architecture. Logging is key to capturing actions and auditing. You can manage logging and auditing by using the built-in audit support described in the link:{BaseURL}/red_hat_enterprise_linux/9/html/security_hardening/auditing-the-system_security-hardening[Auditing the system] section of the Security hardening for {RHEL} guide. Controller's built-in logging and activity stream support {ControllerName} logs all changes within {ControllerName} and automation logs for auditing purposes. More detailed information is available in the link:https://docs.ansible.com/automation-controller/latest/html/administration/logging.html[Logging and Aggregation] section of the {ControllerName} documentation. +Visibility and analytics is an important pillar of Enterprise Security and Zero Trust Architecture. +Logging is key to capturing actions and auditing. +You can manage logging and auditing by using the built-in audit support described in the link:{BaseURL}/red_hat_enterprise_linux/9/html/security_hardening/auditing-the-system_security-hardening[Auditing the system] section of the Security hardening for {RHEL} guide. +{PlatformNameShort}'s built-in logging and activity stream logs all change within {PlatformName} and automation logs for auditing purposes. +More detailed information is available in the link:{URLControllerAdminGuide}/assembly-controller-logging-aggregation[Logging and Aggregation] section of {TitleControllerAdminGuide}. -This guide recommends that you configure {PlatformNameShort} and the underlying {RHEL} systems to collect logging and auditing centrally, rather than reviewing it on the local system. {ControllerNameStart} must be configured to use external logging to compile log records from multiple components within the controller server. The events occurring must be time-correlated to conduct accurate forensic analysis. This means that the controller server must be configured with an NTP server that is also used by the logging aggregator service, as well as the targets of the controller. The correlation must meet certain industry tolerance requirements. In other words, there might be a varying requirement that time stamps of different logged events must not differ by any amount greater than X seconds. This capability should be available in the external logging service. +This guide recommends that you configure {PlatformNameShort} and the underlying {RHEL} systems to collect logging and auditing centrally, rather than reviewing it on the local system. +{PlatformNameShort} must be configured to use external logging to compile log records from multiple components within the {PlatformNameShort} server. +The events occurring must be time-correlated to conduct accurate forensic analysis. +This means that the {PlatformNameShort} server must be configured with an NTP server that is also used by the logging aggregator service, as well as the targets of {PlatformNameShort}. +The correlation must meet certain industry tolerance requirements. +In other words, there might be a varying requirement that time stamps of different logged events must not differ by any amount greater than _x_ seconds. +This capability should be available in the external logging service. -Another critical capability of logging is the ability to use cryptography to protect the integrity of log tools. Log data includes all information (for example, log records, log settings, and log reports) needed to successfully log information system activity. It is common for attackers to replace the log tools or inject code into the existing tools to hide or erase system activity from the logs. To address this risk, log tools must be cryptographically signed so that you can identify when the log tools have been modified, manipulated, or replaced. For example, one way to validate that the log tool(s) have not been modified, manipulated or replaced is to use a checksum hash against the tool file(s). This ensures the integrity of the tool(s) has not been compromised. +Another critical capability of logging is the ability to use cryptography to protect the integrity of log tools. Log data includes all information (for example, log records, log settings, and log reports) needed to successfully log information system activity. +It is common for attackers to replace the log tools or inject code into the existing tools to hide or erase system activity from the logs. +To address this risk, log tools must be cryptographically signed so that you can identify when the log tools have been modified, manipulated, or replaced. +For example, one way to validate that the log tool(s) have not been modified, manipulated or replaced is to use a checksum hash against the tool file(s). +This ensures the integrity of the tool(s) has not been compromised. diff --git a/downstream/modules/aap-hardening/con-network-firewall-services-planning.adoc b/downstream/modules/aap-hardening/con-network-firewall-services-planning.adoc index ba5de6ccfd..c5b784b744 100644 --- a/downstream/modules/aap-hardening/con-network-firewall-services-planning.adoc +++ b/downstream/modules/aap-hardening/con-network-firewall-services-planning.adoc @@ -3,20 +3,24 @@ [id="con-network-firewall-services_{context}"] -= Network, firewall, and network services planning for {PlatformNameShort} +//= Network, firewall, and network services planning for {PlatformNameShort} -[role="_abstract"] +//[role="_abstract"] -{PlatformNameShort} requires access to a network to integrate to external auxiliary services and to manage target environments and resources such as hosts, other network devices, applications, cloud services. The link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/red_hat_ansible_automation_platform_planning_guide/index#ref-network-ports-protocols_planning[network ports and protocols] section of the {PlatformNameShort} planning guide describes how {PlatformNameShort} components interact on the network as well as which ports and protocols are used, as shown in the following diagram: +//{PlatformNameShort} requires access to a network to integrate to external auxiliary services and to manage target environments and resources such as hosts, other network devices, applications, cloud services. +//The link:{URLPlanningGuide}?ref-network-ports-protocols_planning[network ports and protocols] section of {TitlePlanningGuide} describes how {PlatformNameShort} components interact on the network as well as which ports and protocols are used, as shown in the following diagram: -.{PlatformNameShort} Network ports and protocols -image::aap-network-ports-protocols.png[Interaction of {PlatformNameShort} components on the network with information about the ports and protocols that are used.] +//.{PlatformNameShort} Network ports and protocols +//image::aap-network-ports-protocols.png[Interaction of {PlatformNameShort} components on the network with information about the ports and protocols that are used.] -When planning firewall or cloud network security group configurations related to {PlatformNameShort}, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/red_hat_ansible_automation_platform_planning_guide/index#ref-network-ports-protocols_planning[Network ports and protocols] section of the {PlatformNameShort} Planning Guide to understand what network ports need to be opened on a firewall or security group. +When planning firewall or cloud network security group configurations related to {PlatformNameShort}, see the +"Network Ports" section of your chosen topology in link:{LinkTopologies} +//link:{URLPlanningGuide}?ref-network-ports-protocols_planning[network ports and protocols] section of {TitlePlanningGuide} +to understand what network ports need to be opened on a firewall or security group. -For more information on using a load balancer, and for outgoing traffic requirements for services compatible with {PlatformNameShort}. Consult the Red Hat Knowledgebase article link:https://access.redhat.com/solutions/6756251[What ports need to be opened in the firewall for {PlatformNameShort} 2 Services?]. For internet-connected systems, this article also defines the outgoing traffic requirements for services that {PlatformNameShort} can be configured to use, such as {HubNameMain}, {InsightsName}, {Galaxy}, the registry.redhat.io container image registry, and so on. +//For more information on using a load balancer, and for outgoing traffic requirements for services compatible with {PlatformNameShort}. Consult the Red Hat Knowledgebase article link:https://access.redhat.com/solutions/6756251[What ports need to be opened in the firewall for {PlatformNameShort} 2 Services?]. For internet-connected systems, this article also defines the outgoing traffic requirements for services that {PlatformNameShort} can be configured to use, such as {HubNameMain}, {InsightsName}, {Galaxy}, the registry.redhat.io container image registry, and so on. -For internet-connected systems, this article also defines the outgoing traffic requirements for services that {PlatformNameShort} can be configured to use, such as Red Hat {HubName}, {InsightsShort}, {Galaxy}, the registry.redhat.io container image registry, and so on. +For internet-connected systems, the link:{URLPlanningGuide}/ref-network-ports-protocols_planning[Networks and Protocols] section of {TitlePlanningGuide} defines the outgoing traffic requirements for services that {PlatformNameShort} can be configured to use, such as Red Hat {HubName}, {InsightsShort}, {Galaxy}, the registry.redhat.io container image registry, and so on. Restrict access to the ports used by the {PlatformNameShort} components to protected networks and clients. The following restrictions are highly recommended: diff --git a/downstream/modules/aap-hardening/con-patch-automation-with-aap.adoc b/downstream/modules/aap-hardening/con-patch-automation-with-aap.adoc new file mode 100644 index 0000000000..8df0e5f714 --- /dev/null +++ b/downstream/modules/aap-hardening/con-patch-automation-with-aap.adoc @@ -0,0 +1,8 @@ +[id="con-patch-automation-with-aap"] + += Patch automation with {PlatformNameShort} + +Software patching is a fundamental activity of security and IT operations teams everywhere. +Keeping patches up to date is critical to remediating software vulnerabilities and meeting compliance requirements, but patching systems manually at scale can be time-consuming and error-prone. +Organizations should put thought into patch management strategies that meet their security, compliance, and business objectives, to prioritize the types of patches to apply (known exploits, critical or important vulnerabilities, optimizations, routine updates, new features, and so on) against the IT assets available across the enterprise. +Once policies and priorities have been defined and a patching plan is established, the manual tasks involved in patch management can be automated using {PlatformName} to improve patch deployment speed and accuracy, reduce human error, and limit downtime. diff --git a/downstream/modules/aap-hardening/con-patching-examples.adoc b/downstream/modules/aap-hardening/con-patching-examples.adoc new file mode 100644 index 0000000000..0c36ed064c --- /dev/null +++ b/downstream/modules/aap-hardening/con-patching-examples.adoc @@ -0,0 +1,7 @@ +[id="con-patching-examples"] + += Patching examples + +The following playbooks are provided as patching examples, and should be modified to fit the target environment and tested thoroughly before being used in production. +These examples use the `ansible.builtin.dnf` module for managing packages on RHEL and other operating systems that use the `dnf` package manager. +Modules for patching other Linux operating systems, Microsoft Windows, and many network devices are also available. diff --git a/downstream/modules/aap-hardening/con-planning-considerations.adoc b/downstream/modules/aap-hardening/con-planning-considerations.adoc index f6f8617464..eb116f47c4 100644 --- a/downstream/modules/aap-hardening/con-planning-considerations.adoc +++ b/downstream/modules/aap-hardening/con-planning-considerations.adoc @@ -7,16 +7,14 @@ [role="_abstract"] -When planning an {PlatformNameShort} installation, ensure that the following components are included: +{PlatformName} is composed of the following primary components: -* Installer-manged components -** {ControllerNameStart} -** {EDAcontroller} -** {PrivateHubNameStart} -* PostgreSQL database (if not external) -** External services -** {InsightsName} -** {HubNameStart} -** `registry.redhat.io` (default {ExecEnvShort} container registry) +* {ControllerNameStart} +* {AutomationMeshStart} +* {PrivateHubNameStart} +* {EDAcontroller} -See the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_planning_guide/platform-system-requirements[system requirements] section of the _{PlatformName} Planning Guide_ for additional information. +A PostgreSQL database is also provided, although a user-provided PostgreSQL database can be used as well. +Red Hat recommends that customers always deploy all components of {PlatformNameShort} so that all features and capabilities are available for use without the need to take further action. + +For further information, see link:{URLPlanningGuide}/aap_architecture[{PlatformName} Architecture] diff --git a/downstream/modules/aap-hardening/con-platform-components.adoc b/downstream/modules/aap-hardening/con-platform-components.adoc index e3768d3ad3..de3d9c0b97 100644 --- a/downstream/modules/aap-hardening/con-platform-components.adoc +++ b/downstream/modules/aap-hardening/con-platform-components.adoc @@ -7,8 +7,8 @@ [role="_abstract"] -{PlatformNameShort} is a modular platform that includes {ControllerName}, {HubName}, {EDAcontroller}, and {InsightsShort}. +{PlatformNameShort} is a modular platform composed of separate components that can be connected together, including {ControllerName}, {Gateway}, {HubName}, and {EDAcontroller}. [role="_additional-resources"] .Additional resources -For more information about the components provided within {PlatformNameShort}, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_planning_guide/ref-aap-components[Red Hat Ansible Automation Platform components] in the _Red Hat Ansible Automation Platform Planning Guide_. +For more information about the components provided within {PlatformNameShort}, see link:{URLPlanningGuide}/ref-aap-components[Red Hat Ansible Automation Platform components] in _Planning your installation_. diff --git a/downstream/modules/aap-hardening/con-product-overview.adoc b/downstream/modules/aap-hardening/con-product-overview.adoc index f3c42ba54d..daaf77320b 100644 --- a/downstream/modules/aap-hardening/con-product-overview.adoc +++ b/downstream/modules/aap-hardening/con-product-overview.adoc @@ -7,6 +7,11 @@ [role="_abstract"] -Ansible is an open source, command-line IT automation software application written in Python. You can use {PlatformNameShort} to configure systems, deploy software, and orchestrate advanced workflows to support application deployment, system updates, and more. Ansible’s main strengths are simplicity and ease of use. It also has a strong focus on security and reliability, featuring minimal moving parts. It uses secure, well-known communication protocols like SSH, HTTPS, and WinRM for transport and uses a human-readable language that is designed for getting started quickly without extensive training. +Ansible is an open source, command-line IT automation software application written in Python. +You can use {PlatformNameShort} to configure systems, deploy software, and orchestrate advanced workflows to support application deployment, system updates, and more. +Ansible's main strengths are simplicity and ease of use. It also has a strong focus on security and reliability, featuring minimal moving parts. It uses secure, well-known communication protocols like SSH, HTTPS, and WinRM for transport and uses a human-readable language that is designed for getting started quickly without extensive training. -{PlatformNameShort} enhances the Ansible language with enterprise-class features, such as Role-Based Access Controls (RBAC), centralized logging and auditing, credential management, job scheduling, and complex automation workflows. With {PlatformNameShort} you get certified content from our robust partner ecosystem; added security, reporting, and analytics; and life cycle technical support to scale automation across your organization. {PlatformNameShort} simplifies the development and operation of automation workloads for managing enterprise application infrastructure life cycles. It works across multiple IT domains including operations, networking, security, and development, as well as across diverse hybrid environments. \ No newline at end of file +{PlatformNameShort} enhances the Ansible language with enterprise-class features, such as _Role-Based Access Controls_ (RBAC), centralized logging and auditing, credential management, job scheduling, and complex automation workflows. +With {PlatformNameShort} you get certified content from our robust partner ecosystem; added security; reporting, and analytics, as well as life cycle technical support to scale automation across your organization. +{PlatformNameShort} simplifies the development and operation of automation workloads for managing enterprise application infrastructure life cycles. +It works across multiple IT domains including operations, networking, security, and development, as well as across diverse hybrid environments. \ No newline at end of file diff --git a/downstream/modules/aap-hardening/con-protect-sensitive-data-no-log.adoc b/downstream/modules/aap-hardening/con-protect-sensitive-data-no-log.adoc new file mode 100644 index 0000000000..60924f07cd --- /dev/null +++ b/downstream/modules/aap-hardening/con-protect-sensitive-data-no-log.adoc @@ -0,0 +1,5 @@ +[id="con-protect-sensitive-data-no-log"] + += Protecting sensitive data with no_log +If you save Ansible output to a log, you expose any secret data in your Ansible output, such as passwords and usernames. +To keep sensitive values out of your logs, mark tasks that expose them with the `no_log: True` attribute. However, the `no_log` attribute does not affect debugging output, so be careful not to debug playbooks in a production environment. \ No newline at end of file diff --git a/downstream/modules/aap-hardening/con-rbac.adoc b/downstream/modules/aap-hardening/con-rbac.adoc index dee75d460a..95efc5f708 100644 --- a/downstream/modules/aap-hardening/con-rbac.adoc +++ b/downstream/modules/aap-hardening/con-rbac.adoc @@ -7,17 +7,21 @@ [role="_abstract"] -As an administrator, you can use the Role-Based Access Controls (RBAC) built into {ControllerName} to delegate access to server inventories, organizations, and more. Administrators can also centralize the management of various credentials, allowing end users to leverage a needed secret without ever exposing that secret to the end user. RBAC controls allow the controller to help you increase security and streamline management. +As an administrator, you can use the _Role-Based Access Controls_ (RBAC) built into the {Gateway} to delegate access to server inventories, organizations, and more. +Administrators can also centralize the management of various credentials, enabling end users to use a needed secret without ever exposing that secret to the end user. +RBAC controls allow {PlatformNameShort} to help you increase security and streamline management. -RBAC is the practice of granting roles to users or teams. RBACs are easiest to think of in terms of Roles which define precisely who or what can see, change, or delete an “object” for which a specific capability is being set. +RBAC is the practice of granting roles to users or teams. +RBAC is easiest to think of in terms of Roles which define precisely who or what can see, change, or delete an “object” for which a specific capability is being set. -There are a few main concepts that you should become familiar with regarding {ControllerName}'s RBAC design–roles, resources, and users. Users can be members of a role, which gives them certain access to any resources associated with that role, or any resources associated with “descendant” roles. +There are a few main concepts that you should become familiar with regarding {PlatformNameShort}'s RBAC design–roles, resources, and users. +Users can be members of a role, which gives them certain access to any resources associated with that role, or any resources associated with “descendant” roles. A role is essentially a collection of capabilities. Users are granted access to these capabilities and the controller’s resources through the roles to which they are assigned or through roles inherited through the role hierarchy. Roles associate a group of capabilities with a group of users. All capabilities are derived from membership within a role. Users receive capabilities only through the roles to which they are assigned or through roles they inherit through the role hierarchy. All members of a role have all capabilities granted to that role. Within an organization, roles are relatively stable, while users and capabilities are both numerous and may change rapidly. Users can have many roles. -For further detail on Role Hierarchy, access inheritance, Built in Roles, permissions, personas, Role Creation, and so on see link:https://docs.ansible.com/automation-controller/latest/html/userguide/security.html#role-based-access-controls[Role-Based Access Controls]. +For further detail on Role Hierarchy, access inheritance, Built in Roles, permissions, personas, Role Creation, and so on see link:https://docs.ansible.com/automation-controller/latest/html/userguide/security.html#role-based-access-controls[Managing access with Role-Based access controls]. The following is an example of an organization with roles and resource permissions: @@ -26,6 +30,12 @@ image::aap_ref_arch_2.4.1.png[Reference architecture for an example of an organi User access is based on managing permissions to system objects (users, groups, namespaces) rather than by assigning permissions individually to specific users. You can assign permissions to the groups you create. You can then assign users to these groups. This means that each user in a group has the permissions assigned to that group. -Groups created in Automation Hub can range from system administrators responsible for governing internal collections, configuring user access, and repository management to groups with access to organize and upload internally developed content to Automation Hub. For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_automation_hub/assembly-user-access#ref-permissions[{HubNameStart} permissions] for consistency. +Teams created in {HubName} can range from system administrators responsible for governing internal collections, configuring user access, and repository management to groups with access to organize and upload internally developed content to {HubName}. -View-only access can be enabled for further lockdown of the {PrivateHubName}. By enabling view-only access, you can grant access for users to view collections or namespaces on your {PrivateHubName} without the need for them to log in. View-only access allows you to share content with unauthorized users while restricting their ability to only view or download source code, without permissions to edit anything on your {PrivateHubName}. Enable view-only access for your {PrivateHubName} by editing the inventory file found on your {PlatformName} installer. +//TBD link to getting started with hub +//For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_automation_hub/assembly-user-access#ref-permissions[{HubNameStart} permissions] for consistency. + +View-only access can be enabled for further lockdown of the {PrivateHubName}. +By enabling view-only access, you can grant access for users to view collections or namespaces on your {PrivateHubName} without the need for them to log in. +View-only access allows you to share content with unauthorized users while restricting their ability to only view or download source code, without permissions to edit anything on your {PrivateHubName}. +Enable view-only access for your {PrivateHubName} by editing the inventory file found on your {PlatformName} installer. diff --git a/downstream/modules/aap-hardening/con-rhel-host-planning.adoc b/downstream/modules/aap-hardening/con-rhel-host-planning.adoc index 52ba269f28..fa4ca7ce8f 100644 --- a/downstream/modules/aap-hardening/con-rhel-host-planning.adoc +++ b/downstream/modules/aap-hardening/con-rhel-host-planning.adoc @@ -7,6 +7,10 @@ [role="_abstract"] -The security of {PlatformNameShort} relies in part on the configuration of the underlying {RHEL} servers. For this reason, the underlying {RHEL} hosts for each {PlatformNameShort} component must be installed and configured in accordance with the link:{BaseURL}/red_hat_enterprise_linux/8/html-single/security_hardening/index[Security hardening for {RHEL} 8] or link:{BaseURL}/red_hat_enterprise_linux/9/html-single/security_hardening/index[Security hardening for {RHEL} 9] (depending on which operating system will be used), as well as any security profile requirements (CIS, STIG, HIPAA, and so on) used by your organization. +The security of {PlatformNameShort} relies in part on the configuration of the underlying {RHEL} servers. +For this reason, the underlying {RHEL} hosts for each {PlatformNameShort} component must be installed and configured in accordance with the link:{BaseURL}/red_hat_enterprise_linux/8/html-single/security_hardening/index[Security hardening for {RHEL} 8] or link:{BaseURL}/red_hat_enterprise_linux/9/html-single/security_hardening/index[Security hardening for {RHEL} 9] (depending on which operating system is used), as well as any security profile requirements (_Center for Internet Security_ (CIS), STIG, _Health Insurance Portability and Accountability Act_ (HIPAA), and so on) used by your organization. +This document recommends {RHEL} 9 for all new deployments. +When using the container-based installation method, {RHEL} 9 is required. -Note that applying certain security controls from the STIG or other security profiles may conflict with {PlatformNameShort} support requirements. Some examples are listed in the xref:con-controller-stig-considerations_{context}[{ControllerNameStart} STIG considerations] section, although this is not an exhaustive list. To maintain a supported configuration, be sure to discuss any such conflicts with your security auditors so the {PlatformNameShort} requirements are understood and approved. +//Note that applying certain security controls from the STIG or other security profiles may conflict with {PlatformNameShort} support requirements. +//Some examples are listed in the xref:con-controller-stig-considerations_{context}[{ControllerNameStart} STIG considerations] section, although this is not an exhaustive list. To maintain a supported configuration, be sure to discuss any such conflicts with your security auditors so the {PlatformNameShort} requirements are understood and approved. diff --git a/downstream/modules/aap-hardening/con-security-operations-center.adoc b/downstream/modules/aap-hardening/con-security-operations-center.adoc new file mode 100644 index 0000000000..3cd095b9e9 --- /dev/null +++ b/downstream/modules/aap-hardening/con-security-operations-center.adoc @@ -0,0 +1,14 @@ +[id="con-security-operations-center"] + += {PlatformName} as part of a Security Operations Center + +Protecting your organization is a critical task. +Automating functions of your _Security Operations Center_ (SOC) can help you streamline security operations, response, and remediation activities at scale to reduce the risk and cost of breaches. +{PlatformName} can connect your security teams, tools, and processes for more successful automation adoption and use. +Learn how automation can help you safeguard your business and respond to growing security threats faster. + +link:https://www.redhat.com/en/resources/security-automation-ebook[Simplify your security operations center] provides an overview of the benefits to automating SOC operations, including such use cases as: + +* Investigation enrichment +* Threat hunting +* Incident response diff --git a/downstream/modules/aap-hardening/con-user-authentication-planning.adoc b/downstream/modules/aap-hardening/con-user-authentication-planning.adoc index cfaff9004b..e0915d5213 100644 --- a/downstream/modules/aap-hardening/con-user-authentication-planning.adoc +++ b/downstream/modules/aap-hardening/con-user-authentication-planning.adoc @@ -7,15 +7,14 @@ [role="_abstract"] -When planning for access to the {PlatformNameShort} user interface or API, be aware that user accounts can either be local or mapped to an external authentication source such as LDAP. This guide recommends that where possible, all primary user accounts should be mapped to an external authentication source. Using external account sources eliminates a source of error when working with permissions in this context and minimizes the amount of time devoted to maintaining a full set of users exclusively within {PlatformNameShort}. This includes accounts assigned to individual persons as well as for non-person entities such as service accounts used for external application integration. Reserve any local administrator accounts such as the default "admin" account for emergency access or "break glass" scenarios where the external authentication mechanism is not available. - - -[NOTE] -==== -The {EDAcontroller} does not currently support external authentication, only local accounts. -==== - -For user accounts on the {RHEL} servers that run the {PlatformNameShort} services, follow your organizational policies to determine if individual user accounts should be local or from an external authentication source. Only users who have a valid need to perform maintenance tasks on the {PlatformNameShort} components themselves should be granted access to the underlying {RHEL} servers, as the servers will have configuration files that contain sensitive information such as encryption keys and service passwords. Because these individuals must have privileged access to maintain {PlatformNameShort} services, minimizing the access to the underlying {RHEL} servers is critical. Do not grant sudo access to the root account or local {PlatformNameShort} service accounts (awx, pulp, postgres) to untrusted users. +When planning for access to the {PlatformNameShort} user interface or API, be aware that user accounts can either be local or mapped to an external authentication source such as LDAP. +This guide recommends that where possible, all primary user accounts should be mapped to an external authentication source. +Using external account sources eliminates a source of error when working with permissions in this context and minimizes the amount of time devoted to maintaining a full set of users exclusively within {PlatformNameShort}. This includes accounts assigned to individual persons as well as for non-person entities such as service accounts used for external application integration. +Reserve any local administrator accounts such as the default "admin" account for emergency access or "break glass" scenarios where the external authentication mechanism is not available. + +For user accounts on the {RHEL} servers that run the {PlatformNameShort} services, follow your organizational policies to determine if individual user accounts should be local or from an external authentication source. +Only users who have a valid need to perform maintenance tasks on the {PlatformNameShort} components themselves should be granted access to the underlying {RHEL} servers, as the servers will have configuration files that contain sensitive information such as encryption keys and service passwords. +Because these individuals must have privileged access to maintain {PlatformNameShort} services, minimizing the access to the underlying {RHEL} servers is critical. Do not grant sudo access to the root account or local {PlatformNameShort} service accounts (awx, pulp, postgres) to untrusted users. [NOTE] ==== diff --git a/downstream/modules/aap-hardening/platform b/downstream/modules/aap-hardening/platform new file mode 120000 index 0000000000..01b1259b79 --- /dev/null +++ b/downstream/modules/aap-hardening/platform @@ -0,0 +1 @@ +../platform/ \ No newline at end of file diff --git a/downstream/modules/aap-hardening/proc-configure-centralized-logging.adoc b/downstream/modules/aap-hardening/proc-configure-centralized-logging.adoc index 577f2060e1..4b223f7bb9 100644 --- a/downstream/modules/aap-hardening/proc-configure-centralized-logging.adoc +++ b/downstream/modules/aap-hardening/proc-configure-centralized-logging.adoc @@ -5,89 +5,155 @@ = Configure centralized logging -A critical capability of logging is the ability for the {ControllerName} to detect and take action to mitigate a failure, such as reaching storage capacity, which by default shuts down the controller. This guide recommends that the application server be part of a high availability system. When this is the case, {ControllerName} will take the following steps to mitigate failure: +Centralized logging is essential to assist in monitoring system security and performing large-scale log analysis. +The _Confidentiality, Integrity, and Availability_ (CIA) triad, which originated from a combination of ideas from the US military and government, is the model that is the foundation for proper security system development and best practices. Centralized logging falls under the Integrity aspect to assist in identifying if data or systems have been tampered with. +The logging to a centralized system enables troubleshooting automation across multiple systems by collecting all logs in the single location therefore making it easier to identify issues, analyze trends and correlate events from different servers, especially in a complex {PlatformNameShort} deployment. +Otherwise, manually checking individual machines would be time consuming so this capability is valuable with debugging in addition to meeting security best practices. +This ensures the overall system health, stability and assists in identifying potential security threats. +In addition to the logging configuration, the failure to log due to storage capacity, hardware failure as well as high availability architecture should be taken into consideration. -* If the failure was caused by the lack of log record storage capacity, the application must continue generating log records if possible (automatically restarting the log service if necessary), overwriting the oldest log records in a first-in-first-out manner. -* If log records are sent to a centralized collection server and communication with this server is lost or the server fails, the application must queue log records locally until communication is restored or until the log records are retrieved manually. Upon restoration of the connection to the centralized collection server, action must be taken to synchronize the local log data with the collection server. +There are several additional benefits including: -To verify the rsyslog configuration for each {ControllerName} host, complete the following steps for each {ControllerName}: +* The data is sent in JSON format over a HTTP connection using minimal service-specific tweaks engineered in a custom handler or through an imported library. +The types of data that are most useful to the controller are job fact data, job events/job runs, activity stream data, and log messages. +* Deeper insights into the automation process by analyzing logs from different parts of the infrastructure, including playbook execution details, task outcomes, and system events. +* Identifying performance bottlenecks and optimizing the Ansible playbooks by analyzing execution times and resource usage from the logs. +* Centralized logging helps meet compliance mandates by providing a single source of truth for auditing purposes. +* Third Party integration with a centralized log management platform like Splunk, Logstash, ElasticSearch, or Loggly to collect and analyze logs. -The administrator must check the rsyslog configuration for each {ControllerName} host to verify the log rollover against a organizationally defined log capture size. To do this, use the following steps, and correct using the configuration steps as required: +The logging aggregator service works with the following monitoring and data analysis systems: -. Check the `LOG_AGGREGATOR_MAX_DISK_USAGE_GB` field in the {ControllerName} configuration. On the host, execute: +* Splunk +* Loggly +* Sumologic +* Elastic stack (formerly ELK stack) + +include::platform/proc-controller-set-up-logging.adoc[leveloffset=+3] + +The following steps enable the LDAP logging: + +To enable logging for LDAP, use the following procedure. + +.Procedure + +. Edit the gateway settings file: +.. On {PlatformNameShort}{PlatformVers} Containerized, the file is `~/aap/gateway/etc/settings.py` (as the user running the {Gateway} container). +.. On {PlatformNameShort}{PlatformVers} RPM-based, the file is `/etc/ansible-automation-platform/gateway/settings.py`. + ---- -awx-manage print_settings LOG_AGGREGATOR_MAX_DISK_USAGE_GB + (...) + CACHES['fallback']['LOCATION'] = '/var/cache/ansible-automation-platform/gateway' + + LOGGING['loggers']['aap']['level'] = 'INFO' + LOGGING['loggers']['ansible_base']['level'] = 'INFO' + LOGGING['loggers']['django_auth_ldap']['level'] = 'DEBUG' ###### add this line + + (...) ---- -+ -If this field is not set to the organizationally defined log capture size, then follow the configuration steps. -. Check `LOG_AGGREGATOR_MAX_DISK_USAGE_PATH` field in the {ControllerName} configuration for the log file location to `/var/lib/awx`. On the host, execute: +. Restart the {Gateway} service or container: + +.. On {PlatformNameShort}{PlatformVers} Containerized, restart the {Gateway} service so that it restarts the {Gateway} container: + ----- -awx-manage print_settings LOG_AGGREGATOR_MAX_DISK_USAGE_PATH ----- +[NOTE] +==== +Ensure that you run `systemctl with the `--user`` flag as follows: ++ +`$ systemctl --user restart automation-gateway` +==== + +.. On {PlatformNameShort}{PlatformVers} RPM-based, use the `automation-gateway-service` command: + -If this field is not set to `/var/lib/awx`, then follow these configuration steps: +`# automation-gateway-service restart` + +Some of the following examples of meeting compliance requirements come from the US DoD _Security Technical Implementation Guide_, but go back to integrity and security best practices. + +{ControllerNameStart} must use external log providers that can collect user activity logs in independent, protected repositories to prevent modification or repudiation. +{ControllerNameStart} must be configured to use external logging to compile log records from multiple components within the server. +The events occurring must be time-correlated in order to conduct accurate forensic analysis. +In addition, the correlation must meet certain tolerance criteria. + +The following steps implement the security control: + +.Procedure +. Log in to {ControllerName} as an administrator. +. From the navigation panel, select {MenuSetLogging}. +. On the *Logging settings* page, click btn:[Edit]. +. Set the following fields: + +* Set *Logging Aggregator* to `Not configured`. This is the default. +* Set *Enable External Logging* to `On`. +* Set *Logging Aggregator Level Threshold* to DEBUG. +* Set *TCP Connection Timeout* to 5 (the default) or to the organizational timeout. +* Set *Enable/disable HTTPS certificate verification* to `On`. +. Click btn:[Save]. + +{ControllerNameStart} must allocate log record storage capacity and shut down by default upon log failure (unless availability is an overriding concern). +It is critical that when a system is at risk of failing to process logs, it detects and takes action to mitigate the failure. +Log processing failures include software/hardware errors, failures in the log capturing mechanisms, and log storage capacity being reached or exceeded. +During a failure, the application server must be configured to shut down unless the application server is part of a high availability system. +When availability is an overriding concern, other approved actions in response to a log failure are as follows: + +. If the failure was caused by the lack of log record storage capacity, the application must continue generating log records if possible (automatically restarting the log service if necessary), overwriting the oldest log records in a first-in-first-out manner. +. If log records are sent to a centralized collection server and communication with this server is lost or the server fails, the application must queue log records locally until communication is restored or until the log records are retrieved manually. +Upon restoration of the connection to the centralized collection server, action must be taken to synchronize the local log data with the collection server. + --- -.. Open a web browser and navigate to \https:///api/v2/settings/logging/, where is the fully-qualified hostname of your {ControllerName}. If the btn:[Log In] option is displayed, click it, log in as an {ControllerName} adminstrator account, and continue. +The following steps implment the security control: -.. In the Content section, modify the following values, then click btn:[PUT]: +.. Open a web browser and navigate to the logging API, `/api/v2/settings/logging/` + -* LOG_AGGREGATOR_MAX_DISK_USAGE_GB = -* LOG_AGGREGATOR_MAX_DISK_USAGE_PATH = `/var/lib/awx` --- -+ -Note that this change will need to be made on each {ControllerName} in a load-balanced scenario. +Ensure that you are authenticated as an {ControllerName} administrator. +.. In the *Content* section, modify the following values: -All user session data must be logged to support troubleshooting, debugging and forensic analysis for visibility and analytics. Without this data from the controller’s web server, important auditing and analysis for event investigations will be lost. To verify that the system is configured to ensure that user session data is logged, use the following steps: +** `LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB` = organization-defined requirement for log buffering. +** `LOG_AGGREGATOR_MAX_DISK_USAGE_PATH` = `/var/lib/awx`` +..Click btn:[PUT]. -For each {ControllerName} host, navigate to console Settings >> System >> Miscellaneous System. +{ControllerNameStart} must generate the appropriate log records. +{ControllerNameStarts}'s web server must log all details related to user sessions in support of troubleshooting, debugging, and forensic analysis. +Without a data logging feature, the organization loses an important auditing and analysis tool for event investigations. +To implement the security control as a System Administrator, for each {ControllerName} host: + +.Procedure +. From the navigation panel, select {MenuSetSystem}. The System Settings page is displayed. . Click btn:[Edit]. . Set the following: -* Enable Activity Stream = On -* Enable Activity Stream for Inventory Sync = On -* Organization Admins Can Manage Users and Teams = Off -* All Users Visible to Organization Admins = On -. Click btn:[Save] - -To set up logging to any of the aggregator types, read the documentation on link:https://docs.ansible.com/automation-controller/latest/html/administration/logging.html#logging-aggregator-services[supported log aggregators] and configure your log aggregator using the following steps: - -. Navigate to {PlatformNameShort}. -. Click btn:[Settings]. -. Under the list of System options, select Logging settings. -. At the bottom of the Logging settings screen, click btn:[Edit]. -. Set the configurable options from the fields provided: -* Enable External Logging: Click the toggle button to btn:[ON] if you want to send logs to an external log aggregator. The UI requires the Logging Aggregator and Logging Aggregator Port fields to be filled in before this can be done. -* Logging Aggregator: Enter the hostname or IP address you want to send logs. -* Logging Aggregator Port: Specify the port for the aggregator if it requires one. -* Logging Aggregator Type: Select the aggregator service from the drop-down menu: -** Splunk -** Loggly -** Sumologic -** Elastic stack (formerly ELK stack) -* Logging Aggregator Username: Enter the username of the logging aggregator if required. -* Logging Aggregator Password/Token: Enter the password of the logging aggregator if required. -* Log System Tracking Facts Individually: Click the tooltip icon for additional information, whether or not you want to turn it on, or leave it off by default. -* Logging Aggregator Protocol: Select a connection type (protocol) to communicate with the log aggregator. Subsequent options vary depending on the selected protocol. -* Logging Aggregator Level Threshold: Select the level of severity you want the log handler to report. -* TCP Connection Timeout: Specify the connection timeout in seconds. This option is only applicable to HTTPS and TCP log aggregator protocols. -* Enable/disable HTTPS certificate verification: Certificate verification is enabled by default for HTTPS log protocol. Click the toggle button to btn:[OFF] if you do not want the log handler to verify the HTTPS certificate sent by the external log aggregator before establishing a connection. -* Loggers to Send Data to the Log Aggregator Form: All four types of data are pre-populated by default. Click the tooltip icon next to the field for additional information on each data type. Delete the data types you do not want. -* Log Format For API 4XX Errors: Configure a specific error message. -. Click btn:[Save] to apply the settings or btn:[Cancel] to abandon the changes. -. To verify if your configuration is set up correctly, btn:[Save] first then click btn:[Test]. This sends a test log message to the log aggregator using the current logging configuration in the {ControllerName}. You should check to make sure this test message was received by your external log aggregator. - -A {ControllerName} account is automatically created for any user who logs in with an LDAP username and password. These users can automatically be placed into organizations as regular users or organization administrators. This means that logging should be turned on when LDAP integration is in use. You can enable logging messages for the SAML adapter the same way you can enable logging for LDAP. -The following steps enable the LDAP logging: +* *Enable Activity Stream* = On +* *Enable Activity Stream for Inventory Sync* = On +* *Organization Admins Can Manage Users and Teams* = On +* *All Users Visible to Organization Admins* = On +. Click btn:[Save]. + +{ControllerNameStart}'s log files must be accessible by explicitly defined privilege. +A failure of the confidentiality of {ControllerName} log files would enable an attacker to identify key information about the system that they might not otherwise be able to obtain that would enable them to enumerate more information to enable escalation or lateral movement. + +To implement the security control. + +.Procedure +. As a system administrator for each {ControllerName} host, set the permissions and owner of the {ControllerName} NGINX log directory: + +* `chmod 770 /var/log/nginx +* `chown nginx:root /var/log/nginx` + +. Set the permissions and owner of the {ControllerName} log directory: + +* `chmod 770 /var/log/tower` +* `chown awx:awx /var/log/tower` + +. Set the permissions and owner of the {controllerName} supervisor log directory: + +* `chmod 770 /var/log/supervisor/` +* `chown root:root /var/log/supervisor/` + +{ControllerNameStart} must be configured to fail over to another system in the event of log subsystem failure. +{ControllerNameStart} hosts must be capable of failing over to another {ControllerName} host which can handle application and logging functions upon detection of an application log processing failure. +This enables continual operation of the application and logging functions while minimizing the loss of operation for the users and loss of log data. + +To implement the security control. + +* If {ControllerName} is not in an HA configuration, the administrator must reinstall {ControllerName}. -To enable logging for LDAP, you must set the level to DEBUG in the Settings configuration window. -. Click btn:[Settings] from the left navigation pane and select Logging settings from the System list of options. -. Click btn:[Edit]. -. Set the Logging Aggregator Level Threshold field to Debug. -. Click btn:[Save] to save your changes. diff --git a/downstream/modules/aap-hardening/proc-configure-external-authentication.adoc b/downstream/modules/aap-hardening/proc-configure-external-authentication.adoc index 2ddb9a1f14..2211741afc 100644 --- a/downstream/modules/aap-hardening/proc-configure-external-authentication.adoc +++ b/downstream/modules/aap-hardening/proc-configure-external-authentication.adoc @@ -7,7 +7,14 @@ [role="_abstract"] -As noted in the xref:con-user-authentication-planning_{context}[User authentication planning] section, external authentication is recommended for user access to the {ControllerName}. After you choose the authentication type that best suits your needs, navigate to {MenuAEAdminSettings} and select *Authentication* in the {ControllerName} UI, click on the relevant link for your authentication back-end, and follow the relevant instructions for link:https://docs.ansible.com/automation-controller/latest/html/administration/configure_tower_in_tower.html#authentication[configuring the authentication] connection. +As noted in the xref:con-user-authentication-planning_{context}[User authentication planning] section, external authentication is recommended for user access to the {ControllerName}. +After you choose the authentication type that best suits your needs: + +. Navigate to {MenuAMAuthentication}. +. Click btn:[Create authentication] +. Select the *Authentication type* you require from the menu. +. Click btn:[Next] +. On the *Authentication details* in the {ControllerName} UI, click on the relevant link for your authentication back-end, and follow the relevant instructions for link:https://docs.ansible.com/automation-controller/latest/html/administration/configure_tower_in_tower.html#authentication[configuring the authentication] connection. // [ddacosta] The following will need to be rewritten for the way this is configured in 2.5 When using LDAP for external authentication with the {ControllerName}, navigate to {MenuAEAdminSettings} and select *Authentication* and then select *LDAP settings* on the {ControllerName} and ensure that one of the following is configured: diff --git a/downstream/modules/aap-hardening/proc-disaster-recovery-operations.adoc b/downstream/modules/aap-hardening/proc-disaster-recovery-operations.adoc index b7c99acf39..ddd830c746 100644 --- a/downstream/modules/aap-hardening/proc-disaster-recovery-operations.adoc +++ b/downstream/modules/aap-hardening/proc-disaster-recovery-operations.adoc @@ -25,4 +25,4 @@ To generate a new secret key, perform the following steps: . Backup your {PlatformNameShort} database before you do anything else! Follow the procedure described in the link:https://docs.ansible.com/automation-controller/latest/html/administration/backup_restore.html[Backing Up and Restoring Controller] section. . Using the inventory from your install (same inventory with which you run backups/restores), run `setup.sh -k`. -A backup copy of the prior key is saved in `/etc/tower/`. +A backup copy of the previous key is saved in `/etc/tower/`. diff --git a/downstream/modules/aap-hardening/proc-fapolicyd.adoc b/downstream/modules/aap-hardening/proc-fapolicyd.adoc index 3e84fbc19a..74fd4879e9 100644 --- a/downstream/modules/aap-hardening/proc-fapolicyd.adoc +++ b/downstream/modules/aap-hardening/proc-fapolicyd.adoc @@ -7,14 +7,18 @@ [role="_abstract"] -The {RHEL} 8 STIG requires the fapolicyd daemon to be running. However, {PlatformNameShort} is not currently supported when fapolicyd enforcing policy, as this causes failures during the installation and operation of {PlatformNameShort}. Because of this, the installer runs a pre-flight check that will halt installation if it discovers that fapolicyd is enforcing policy. This guide recommends setting fapolicyd to permissive mode on the {ControllerName} using the following steps: +A compliance policy might require the `fapolicyd` daemon to be running. +However, {PlatformNameShort} is not currently supported when `fapolicyd` is enforcing policy, as this causes failures during both installation and operation of {PlatformNameShort}. +Because of this, the installation program runs a pre-flight check that halts installation if it discovers that `fapolicyd` is enforcing policy. +This guide recommends setting `fapolicyd` to permissive mode on {PlatformNameShort} using the following steps: . Edit the file `/etc/fapolicyd/fapolicyd.conf` and set "permissive = 1". -. Restart the service with the command `sudo systemctl restart fapolicyd.service`. +. Restart the service with the command ++ +`sudo systemctl restart fapolicyd.service` -In environments where STIG controls are routinely audited, discuss waiving the fapolicy-related STIG controls with your security auditor. [NOTE] ==== -If the {RHEL} 8 STIG is also applied to the installation host, the default fapolicyd configuration causes the {PlatformNameShort} installer to fail. In this case, the recommendation is to set fapolicyd to permissive mode on the installation host. +If this security control is also applied to the installation host, the default `fapolicyd` configuration causes the {PlatformNameShort} installation program to fail. In this case, the recommendation is to set `fapolicyd` to permissive mode on the installation host. ==== diff --git a/downstream/modules/aap-hardening/proc-file-systems-mounted-noexec.adoc b/downstream/modules/aap-hardening/proc-file-systems-mounted-noexec.adoc index bfd7caf517..9e38bf1f7a 100644 --- a/downstream/modules/aap-hardening/proc-file-systems-mounted-noexec.adoc +++ b/downstream/modules/aap-hardening/proc-file-systems-mounted-noexec.adoc @@ -7,13 +7,14 @@ [role="_abstract"] -The {RHEL} 8 STIG requires that a number of file systems are mounted with the `noexec` option to prevent execution of binaries located in these file systems. The {PlatformNameShort} installer runs a preflight check that will fail if any of the following file systems are mounted with the `noexec` option: +A compliance profile might require that certain file systems are mounted with the `noexec` option to prevent execution of binaries located in these file systems. The {PlatformNameShort} RPM-based installer runs a preflight check that fails if any of the following file systems are mounted with the `noexec` option: * `/tmp` * `/var` * `/var/tmp` -To install {PlatformNameShort}, you must re-mount these file systems with the `noexec` option removed. Once installation is complete, proceed with the following steps: +To install {PlatformNameShort}, you must re-mount these file systems with the `noexec` option removed. +When installation is complete, proceed with the following steps: . Reapply the `noexec` option to the `/tmp` and `/var/tmp` file systems. . Change the {ControllerName} job execution path from `/tmp` to an alternate directory that does not have the `noexec` option enabled. diff --git a/downstream/modules/aap-hardening/proc-install-user-pki.adoc b/downstream/modules/aap-hardening/proc-install-user-pki.adoc index 5e472e662d..20e84fa6c7 100644 --- a/downstream/modules/aap-hardening/proc-install-user-pki.adoc +++ b/downstream/modules/aap-hardening/proc-install-user-pki.adoc @@ -7,42 +7,55 @@ [role="_abstract"] -By default, {PlatformNameShort} creates self-signed PKI certificates for the infrastructure components of the platform. Where an existing PKI infrastructure is available, certificates must be generated for the {ControllerName}, {PrivateHubName}, {EDAcontroller}, and the postgres database server. Copy the certificate files and their relevant key files to the installer directory, along with the CA certificate used to verify the certificates. +By default, {PlatformNameShort} creates self-signed _Public Key Infrastructure_ (PKI) certificates for the infrastructure components of the platform. +Where an existing PKI infrastructure is available, certificates must be generated for the {ControllerName}, {PrivateHubName}, {EDAcontroller}, and the postgres database server. +Copy the certificate files and their relevant key files to the installation program directory, along with the CA certificate used to verify the certificates. Use the following inventory variables to configure the infrastructure components with the new certificates. .PKI certificate inventory variables -|=== -| *Variable* | *Details* -| `custom_ca_cert` | The file name of the CA certificate located in the installer directory. +|==== +| *RPM Variable* | *Containerized Variable* | *Details* +| `custom_ca_cert` | `custom_ca_cert` | The path to the custom CA certificate file. -| `web_server_ssl_cert` | The file name of the {ControllerName} PKI certificate located in the installer directory. +If set, this will install a custom CA certificate to the system truststore. -| `web_server_ssl_key` | The file name of the {ControllerName} PKI key located in the installer directory. +| `web_server_ssl_cert` | `controller_tls_cert` | The file name of the {ControllerName} PKI certificate located in the installer directory. -| `automationhub_ssl_cert` | The file name of the {PrivateHubName} PKI certificate located in the installer directory. +| `web_server_ssl_key` | `controller_tls_key` | The file name of the {ControllerName} PKI key located in the installation program directory. -| `automationhub_ssl_key` | The file name of the {PrivateHubName} PKI key located in the installer directory. +| `automationhub_ssl_cert` | `hub_tls_cert` | The file name of the {PrivateHubName} PKI certificate located in the installation program directory. -| `postgres_ssl_cert` | The file name of the database server PKI certificate located in the installer directory. This variable is only needed for the installer-managed database server, not if a third-party database is used. +| `automationhub_ssl_key` | `hub_tls_key` | The file name of the {PrivateHubName} PKI key located in the installation program directory. -| `postgres_ssl_key` | The file name of the database server PKI certificate located in the installer directory. This variable is only needed for the installer-managed database server, not if a third-party database is used. +| `postgres_ssl_cert` | `postgresql_tls_cert` | The file name of the database server PKI certificate located in the installation program directory. This variable is only needed for the installer-managed database server, not if a third-party database is used. -| `automationedacontroller_ssl_cert` | The file name of the {EDAcontroller} PKI certificate located in the installer directory. +| `postgres_ssl_key` | `postgresql_tls_key` | The file name of the database server PKI key located in the installation program directory. This variable is only needed for the installer-managed database server, not if a third-party database is used. -| `automationedacontroller_ssl_key` | The file name of the {EDAcontroller} PKI key located in the installer directory. -|=== +| `automationedacontroller_ssl_cert` | `eda_tls_cert` | The file name of the {EDAcontroller} PKI certificate located in the installation program directory. -When multiple {ControllerName} are deployed with a load balancer, the `web_server_ssl_cert` and `web_server_ssl_key` are shared by each controller. To prevent hostname mismatches, the certificate's Common Name (CN) must match the DNS FQDN used by the load balancer. This also applies when deploying multiple {PrivateHubName} and the `automationhub_ssl_cert` and `automationhub_ssl_key` variables. If your organizational policies require unique certificates for each service, each certificate requires a Subject Alt Name (SAN) that matches the DNS FQDN used for the load-balanced service. To install unique certificates and keys on each {ControllerName}, the certificate and key variables in the installation inventory file must be defined as per-host variables instead of in the `[all:vars]` section. For example: +| `automationedacontroller_ssl_key` | `eda_tls_key` | The file name of the {EDAcontroller} PKI key located in the installation program directory. +| - | `gateway_tls_cert` | The filename of the {Gateway} PKI certificate located in the installation program directory. +| - | `gateway_tls_key` | The file name of the {Gateway} PKI key located in the installation program directory. +|==== + +When multiple {Gateway}s are deployed with a load balancer, `gateway_tls_cert` and `gateway_tls_key` are shared by each {Gateway}. +To prevent hostname mismatches, the certificate's _Common Name_ (CN) must match the DNS FQDN used by the load balancer. +//This also applies when deploying multiple {PrivateHubName} and the `automationhub_ssl_cert` and `automationhub_ssl_key` variables. +If your organizational policies require unique certificates for each service, each certificate requires a _Subject Alt Name_ (SAN) that matches the DNS FQDN used for the load-balanced service. +To install unique certificates and keys on each {Gateway}, the certificate and key variables in the installation inventory file must be defined as per-host variables instead of in the `[all:vars]` section. +For example: ---- +[automationgateway] +gateway0.example.com gateway_tls_cert=/path/to/cert0 gateway_tls_key=/path/to/key0 +gateway1.example.com gateway_tls_cert=/path/to/cert1 gateway_tls_key=/path/to/key1 + [automationcontroller] controller0.example.com web_server_ssl_cert=/path/to/cert0 web_server_ssl_key=/path/to/key0 controller1.example.com web_server_ssl_cert=/path/to/cert1 web_server_ssl_key=/path/to/key1 controller2.example.com web_server_ssl_cert=/path/to/cert2 web_server_ssl_key=/path/to/key2 ----- ----- [automationhub] hub0.example.com automationhub_ssl_cert=/path/to/cert0 automationhub_ssl_key=/path/to/key0 hub1.example.com automationhub_ssl_cert=/path/to/cert1 automationhub_ssl_key=/path/to/key1 diff --git a/downstream/modules/aap-hardening/proc-namespaces.adoc b/downstream/modules/aap-hardening/proc-namespaces.adoc index 581b5e94c6..e306d1d11c 100644 --- a/downstream/modules/aap-hardening/proc-namespaces.adoc +++ b/downstream/modules/aap-hardening/proc-namespaces.adoc @@ -7,9 +7,10 @@ [role="_abstract"] -The {RHEL} 8 STIG requires that the kernel setting `user.max_user_namespaces` is set to "0", but only if Linux containers are not in use. Because {PlatformNameShort} uses containers as part of its {ExecEnvShort} capability, this STIG control does not apply to the {ControllerName}. +A compliance profile might require that the kernel setting `user.max_user_namespaces` is set to "0", to prevent the launch of Linux containers. +The DISA STIG, for example, specifically requires this control but only if Linux containers are not required. Because {PlatformNameShort} can be installed and operated in containers and also uses containers as part of its {ExecEnvShort} capability, Linux containers are required and this control must be disabled. -To check the `user.max_user_namespaces` kernel setting, complete the following steps: +To check the `user.max_user_namespaces` kernel setting, complete the following steps on each {PlatformNameShort} component in the installation inventory: . Log in to your {ControllerName} at the command line. . Run the command `sudo sysctl user.max_user_namespaces`. diff --git a/downstream/modules/aap-hardening/ref-aap-authentication.adoc b/downstream/modules/aap-hardening/ref-aap-authentication.adoc new file mode 100644 index 0000000000..5fade77568 --- /dev/null +++ b/downstream/modules/aap-hardening/ref-aap-authentication.adoc @@ -0,0 +1,38 @@ +// Module included in the following assemblies: +// downstream/assemblies/assembly-hardening-aap.adoc + +[id="ref-aap-authentication_{context}"] + += {PlatformNameShort} authentication + +[role="_abstract"] + +{ControllerNameStart} currently supports the following external authentication mechanisms through the {Gateway} UI: + +* Local +* LDAP +* SAML +* TACACS+ +* Radius +* Azure Active Directory +* Google OAuth +* Generic OIDC +* Keycloak +* GitHub single sign-on +* GitHub +* GitHub Organization +* GitHub team +* GitHub enterprise +* GitHub enterprise organization +* GitHub enterprise team + +Choose an authentication mechanism that adheres to your organization's authentication policies. +The authentication mechanism used must ensure that the authentication-related traffic between {PlatformNameShort} and the authentication back-end is encrypted when the traffic occurs on a public or non-secure network (for example, LDAPS or LDAP over TLS, HTTPS for OAuth2 and SAML providers, etc.). + +For more information on authentication methods, see link:{URLCentralAuth}/gw-configure-authentication#gw-config-authentication-type[Configuring an authetication type]. + +In the {Gateway}, any “system administrator” account can edit, change, and update any inventory or automation definition. +Restrict these account privileges to the minimum set of users possible for low-level {PlatformNameShort} configuration and disaster recovery. + + + diff --git a/downstream/modules/aap-hardening/ref-aap-operational-secrets.adoc b/downstream/modules/aap-hardening/ref-aap-operational-secrets.adoc new file mode 100644 index 0000000000..ba22bfbbd2 --- /dev/null +++ b/downstream/modules/aap-hardening/ref-aap-operational-secrets.adoc @@ -0,0 +1,151 @@ +// Module included in the following assemblies: +// downstream/assemblies/assembly-hardening-aap.adoc + +[id="ref-aap-operational-secrets_{context}"] + += {PlatformNameShort} operational secrets + +{PlatformNameShort} uses several secrets (passwords, keys, and so on) operationally. +These secrets are stored unencrypted on the various {PlatformNameShort} servers, as each component service must read them at startup. +All files are protected by Unix permissions, and restricted to the root user or the appropriate service account user. +These files should be routinely monitored to ensure there has been no unauthorized access or modification. + +== RPM-based installation secrets + +The following table provides the location of these secrets for RPM-based installs of {PlatformNameShort}. + +.{PlatformNameShort} operational secrets + +|=== +2+| *{ControllerNameStart} secrets* +| *File path* | *Description* +| `/etc/tower/SECRET_KEY` | A secret key used for encrypting automation secrets in the database. If the `SECRET_KEY` changes or is unknown, no encrypted fields in the database will be accessible. + +| `/etc/tower/tower.cert` + +`/etc/tower/tower.key` | SSL certificate and key for the {ControllerName} web service. + +A self-signed `cert/key` is installed by default; you can provide a locally appropriate certificate and key. + +For more information, see xref:proc-install-user-pki_{context}[Installing with user-provided PKI certificates]. + +| `/etc/tower/conf.d/postgres.py` | Contains the password used by {ControllerName} to connect to the database, unless TLS authentication is used for the database connection. + +| `/etc/tower/conf.d/channels.py` | Contains the secret used by {ControllerName} for websocket broadcasts. + +| `/etc/tower/conf.d/gateway.py` | Contains the key used by {ControllerName} to sync state with the {Gateway}. + +2+| *{GatewayStart} secrets* +| *File path* | *Description* + +| `/etc/ansible-automation-platform/gateway/SECRET_KEY` | A secret key used for encrypting automation secrets in the database. +If the `SECRET_KEY changes` or is unknown, the {Gateway} cannot access the encrypted secrets in the database. + +| `/etc/ansible-automation-platform/gateway/gateway.cert` | SSL certificate for the {Gateway} web service. + +A self-signed certificate is installed by default, although a user-provided certificate and key pair can be used. + +For more information, see xref:proc-install-user-pki_{context}[Installing with user-provided PKI certificates]. + +| `/etc/ansible-automation-platform/gateway/gateway.key` | SSL key for the {Gateway} web service. + +A self-signed certificate is installed by default, although a user-provided certificate and key pair can be used. + +For more information, see xref:proc-install-user-pki_{context}[Installing with user-provided PKI certificates]. + +| `/etc/ansible-automation-platform/gateway/cache.cert` | SSL certificate used for mutual TLS (mTLS) authentication with the Redis cache used by the {Gateway}. + +| `/etc/ansible-automation-platform/gateway/cache.key` | SSL key used for mutual TLS (mTLS) authentication with the Redis cache used by the {Gateway}. + +| `/etc/ansible-automation-platform/gateway/settings.py` | Contains the password used by the {Gateway} to connect to the database, unless TLS authentication is used for the database connection. +Also contains the password used to connect to the Redis cache used by the {Gateway}. + +2+| *{HubNameStart} secrets* +| *File path* | *Description* +| `/etc/pulp/settings.py` | Contains the password used by {HubName} to connect to the database, unless TLS authentication is used for the database connection. Contains the Django secret key used by the {HubName} web service. + +//| `/etc/pulp/certs/token_public_key.pem` `/etc/pulp/certs/token_private_key.pem` | TBD + +| `/etc/pulp/certs/pulp_webserver.crt` | SSL certificate for the {HubName} web service. + +A self-signed certificate is installed by default, although a user-provided certificate and key pair can be used. + +For more information, see xref:proc-install-user-pki_{context}[Installing with user-provided PKI certificates]. + +| `/etc/pulp/certs/pulp_webserver.key` | SSL key for the {HubName} web service. + +A self-signed certificate is installed by default, although a user-provided certificate and key pair can be used. + +For more information, see xref:proc-install-user-pki_{context}[Installing with user-provided PKI certificates]. + +| `/etc/pulp/certs/database_fields.symmetric.key` | A key used for encrypting sensitive fields in the {HubName} database table. + +If the key changes or is unknown, {HubName} cannot access the encrypted fields in the database. + +2+| *{EDAName} secrets* +| *File path* | *Description* +| `/etc/ansible-automation-platform/eda/SECRET_KEY` | A secret key used for encrypting fields in the {EDAName} controller database table. + +If the SECRET_KEY changes or is unknown, the {EDAName} controller cannot access the encrypted fields in the database. + +| `/etc/ansible-automation-platform/eda/settings.yaml` | Contains the password used by the {EDAName} gateway to connect to the database, unless TLS authentication is used for the database connection. + +Contains the password used to connect to the Redis cache used by the {EDAName} controller. + +Contains the key used by the {EDAName} controller to sync state with the {Gateway}. + +| `/etc/ansible-automation-platform/eda/server.cert` | SSL certificate for the {EDAName} controller web service. + +A self-signed certificate is installed by default, although a user-provided certificate and key pair can be used. + +For more information, see xref:proc-install-user-pki_{context}[Installing with user-provided PKI certificates]. + +| `/etc/ansible-automation-platform/eda/server.key` | SSL key for the {EDAName} controller web service. + +A self-signed certificate is installed by default, although a user-provided certificate and key pair can be used. + +For more information, see xref:proc-install-user-pki_{context}[Installing with user-provided PKI certificates]. + +| `/etc/ansible-automation-platform/eda/cache.cert` | SSL certificate used for mutual TLS (mTLS) authentication with the Redis cache used by the {EDAName} controller + +| `/etc/ansible-automation-platform/eda/cache.key` | SSL key used for mutual TLS (mTLS) authentication with the Redis cache used by the {EDAName} controller + +| `/etc/ansible-automation-platform/eda/websocket.cert` |SSL certificate for the {EDAName} controller websocket endpoint. + +A self-signed certificate is installed by default, although a user-provided certificate and key pair can be used. + +For more information, see xref:proc-install-user-pki_{context}[Installing with user-provided PKI certificates]. + +| `/etc/ansible-automation-platform/eda/websocket.key` | SSL key for the {EDAName} controller websocket endpoint. + +A self-signed certificate is installed by default, although a user-provided certificate and key pair can be used. + +For more information, see xref:proc-install-user-pki_{context}[Installing with user-provided PKI certificates]. + +2+| *Redis secrets* +| *File path* | *Description* +| `/etc/ansible-automation-platform/ca/ansible-automation-platform-managed-ca-cert.crt` | SSL certificate for the internal self-signed certificate authority used by the installer to generate the default self-signed certificates for each component service. + +| `/etc/ansible-automation-platform/ca/ansible-automation-platform-managed-ca-cert.key` | SSL key for the internal self-signed certificate authority used by the installer to generate the default self-signed certificates for each component service. +|=== + +[NOTE] +==== +Some of these file locations reflect the previous product name of {ControllerName}, formerly named Ansible Tower. +==== + +== Container-based installation secrets + +The secrets listed for RPM-based installations are also used in container-based installations, but they are stored in a different manner. +Container-based installations of {PlatformName} use Podman secrets to store operational secrets. +These secrets can be listed using the `podman secret list` command. + +By default, Podman stores data in the home directory of the user who installed and runs the containerized {PlatformName} services. +Podman secrets are stored in the file `$HOME/.local/share/containers/storage/secrets/filedriver/secretsdata.json` as base64-encoded strings, so while they are not in plain text the values are only obfuscated. + +The data stored in a Podman secret can be shown using the command `podman secret inspect --showsecret `. + +This file should be routinely monitored to ensure there has been no unauthorized access or modification. + + + diff --git a/downstream/modules/aap-hardening/ref-architecture.adoc b/downstream/modules/aap-hardening/ref-architecture.adoc index c171814398..65dc3292e7 100644 --- a/downstream/modules/aap-hardening/ref-architecture.adoc +++ b/downstream/modules/aap-hardening/ref-architecture.adoc @@ -3,15 +3,24 @@ [id="ref-architecture_{context}"] -= {PlatformNameShort} reference architecture += {PlatformNameShort} deployment topologies [role="_abstract"] -For large-scale production environments with availability requirements, this guide recommends deploying the components described in section 2.1 of this guide using the instructions in the xref:ref-architecture_{context}[reference architecture] documentation for {PlatformName} on {RHEL}. While some variation may make sense for your specific technical requirements, following the reference architecture results in a supported production-ready environment. +Install {PlatformNameShort} {PlatformVers} based on one of the documented tested deployment reference architectures defined in link:{LinkTopologies}. +Enterprise organizations should use one of the enterprise reference architectures for production deployments to ensure the highest level of uptime, performance, and continued scalability. +Organizations or deployments that are resource constrained can use a "growth" reference architecture. +Review the {TitleTopologies} document to determine the reference architecture that best suits your requirements. +The reference architecture chosen will include planning information such as an architecture diagram, the number of {RHEL} servers required, the network ports and protocols used by the deployment, and load balancer information for enterprise architectures. + +It is possible to install the {PlatformNameShort} on different infrastructure reference architectures and with different environment configurations. +Red Hat does not fully test architectures outside of published deployment models. + +The following diagram is a tested container enterprise architecture: .Reference architecture overview -image::aap-ref-architecture-322.png[Reference architecture for an example setup of an {PlatformNameShort} deployment for large scale production environments] +image::cont-b-env-a.png[Infrastructure reference architecture that Red Hat has tested that customers can use when self-managing {PlatformNameShort}] -{EDAName} is a new feature of {PlatformNameShort} {PlatformVers} that was not available when the reference architecture detailed in Figure 1: Reference architecture overview was originally written. Currently, the supported configuration is a single {ControllerName}, single {HubName}, and single {EDAController} node with external (installer managed) database. For an organization interested in {EDAName}, the recommendation is to install according to the configuration documented in the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/assembly-platform-install-scenario#ref-single-controller-hub-eda-with-managed-db[{PlatformNameShort} Installation Guide]. This document provides additional clarifications when {EDAName} specific hardening configuration is required. +//{EDAName} is a new feature of {PlatformNameShort} {PlatformVers} that was not available when the reference architecture detailed in Figure 1: Reference architecture overview was originally written. Currently, the supported configuration is a single {ControllerName}, single {HubName}, and single {EDAController} node with external (installer managed) database. For an organization interested in {EDAName}, the recommendation is to install according to the configuration documented in the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/assembly-platform-install-scenario#ref-single-controller-hub-eda-with-managed-db[{PlatformNameShort} Installation Guide]. This document provides additional clarifications when {EDAName} specific hardening configuration is required. -For smaller production deployments where the full reference architecture may not be needed, this guide recommends deploying {PlatformNameShort} with a dedicated PostgreSQL database server whether managed by the installer or provided externally. +//For smaller production deployments where the full reference architecture may not be needed, this guide recommends deploying {PlatformNameShort} with a dedicated PostgreSQL database server whether managed by the installer or provided externally. diff --git a/downstream/modules/aap-hardening/ref-automation-controller-authentication.adoc b/downstream/modules/aap-hardening/ref-automation-controller-authentication.adoc index be7e3266fa..84ef394919 100644 --- a/downstream/modules/aap-hardening/ref-automation-controller-authentication.adoc +++ b/downstream/modules/aap-hardening/ref-automation-controller-authentication.adoc @@ -9,7 +9,7 @@ {ControllerNameStart} currently supports the following external authentication mechanisms: -* Azure Activity Directory +* {MSEntraID}, formerly known as {Azure} Active Directory * GitHub single sign-on * Google OAuth2 single sign-in * LDAP @@ -18,9 +18,6 @@ * TACACS+ * Generic OIDC -Choose an authentication mechanism that adheres to your organization's authentication policies, and refer to the link:https://docs.ansible.com/automation-controller/latest/html/administration/configure_tower_in_tower.html#authentication[Controller Configuration - Authentication] documentation to understand the prerequisites for the relevant authentication mechanism. The authentication mechanism used must ensure that the authentication-related traffic between {PlatformNameShort} and the authentication back-end is encrypted when the traffic occurs on a public or non-secure network (for example, LDAPS or LDAP over TLS, HTTPS for OAuth2 and SAML providers, etc.). +Choose an authentication mechanism that adheres to your organization's authentication policies, and see the link:https://docs.ansible.com/automation-controller/latest/html/administration/configure_tower_in_tower.html#authentication[Controller Configuration - Authentication] documentation to understand the prerequisites for the relevant authentication mechanism. The authentication mechanism used must ensure that the authentication-related traffic between {PlatformNameShort} and the authentication back-end is encrypted when the traffic occurs on a public or insecure network (for example, LDAPS or LDAP over TLS, HTTPS for OAuth2 and SAML providers, etc.). In {ControllerName}, any “system administrator” account can edit, change, and update any inventory or automation definition. Restrict these account privileges to the minimum set of users possible for low-level {ControllerName} configuration and disaster recovery. - - - diff --git a/downstream/modules/aap-hardening/ref-automation-controller-operational-secrets.adoc b/downstream/modules/aap-hardening/ref-automation-controller-operational-secrets.adoc deleted file mode 100644 index 5361e59674..0000000000 --- a/downstream/modules/aap-hardening/ref-automation-controller-operational-secrets.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// downstream/assemblies/assembly-hardening-aap.adoc - -[id="ref-automation-controller-operational-secrets_{context}"] - -= {ControllerNameStart} operational secrets - -[role="abstract"] - -{ControllerNameStart} contains the following secrets used operationally: - -.{ControllerNameStart} operational secrets -|=== -| *File* | *Details* -| `/etc/tower/SECRET_KEY` | A secret key used for encrypting automation secrets in the database. If the `SECRET_KEY` changes or is unknown, no encrypted fields in the database will be accessible. - -| `/etc/tower/tower.cert` - -`/etc/tower/tower.key` | SSL certificate and key for the {ControllerName} web service. A self-signed `cert/key` is installed by default; you can provide a locally appropriate certificate and key (see xref:proc-install-user-pki_{context}[Installing with user-provided PKI certificates] for more information). - -| `/etc/tower/conf.d/postgres.py` | Contains the password used by the {ControllerName} to connect to the database. - -| `/etc/tower/conf.d/channels.py` | Contains the secret used by the {ControllerName} for websocket broadcasts. -|=== - -These secrets are stored unencrypted on the {ControllerNameStart} server, as the {ControllerName} service must read them all in an automated fashion at startup. All files are protected by Unix permissions, and restricted to the root user or the {ControllerName} service user awx. These files should be routinely monitored to ensure there has been no unauthorized access or modification. - -[NOTE] -==== -{ControllerNameStart} was formerly named Ansible Tower. These file locations retain the previous product name. -==== diff --git a/downstream/modules/aap-hardening/ref-complex-patching-scenarios.adoc b/downstream/modules/aap-hardening/ref-complex-patching-scenarios.adoc new file mode 100644 index 0000000000..9c12df076f --- /dev/null +++ b/downstream/modules/aap-hardening/ref-complex-patching-scenarios.adoc @@ -0,0 +1,21 @@ +[id="ref-complex-patching-scenarios"] + += Complex patching scenarios + +In {PlatformNameShort}, multiple automation jobs can be chained together into workflows, which can be used to coordinate multiple steps in a complex patching scenario. + +The following example complex patching scenario demonstrates taking virtual machine snapshots, patching the virtual machines, and creating tickets when an error is encountered in the workflow. + +. Run a project sync to ensure the latest playbooks are available. In parallel, run an inventory sync to make sure the latest list of target hosts is available. +. Take a snapshot of each target host. +.. If the snapshot task fails, submit a ticket with the relevant information. +. Patch each of the target hosts. +.. If the patching task fails, restore the snapshot and submit a ticket with the relevant information. +. Delete each snapshot where the patching task was successful. + +The following workflow visualization shows how the components of the example complex patching scenario are executed: + +image:workflow.png[Workflow representation] + +.Additional resources +For more information on workflows, see link:{URLControllerUserGuide}/controller-workflows[Workflows in automation controller]. \ No newline at end of file diff --git a/downstream/modules/aap-hardening/ref-dns-load-balancing.adoc b/downstream/modules/aap-hardening/ref-dns-load-balancing.adoc index 7690611d70..2b82a8d995 100644 --- a/downstream/modules/aap-hardening/ref-dns-load-balancing.adoc +++ b/downstream/modules/aap-hardening/ref-dns-load-balancing.adoc @@ -7,22 +7,6 @@ [role="_abstract"] -When using a load balancer with {PlatformNameShort} as described in the reference architecture, an additional FQDN is needed for each load-balanced component ({ControllerName} and {PrivateHubName}). +When using a load balancer with {PlatformNameShort} as described in the deployment topology, an additional FQDN is needed for the load-balancer. +For example, an FQDN such as `aap.example.com` might be used for the load balancer which in turn directs traffic to each of the {Gateway} components defined in the installation inventory. -For example, if the following hosts are defined in the {PlatformNameShort} installer inventory file: - ------ -[automationcontroller] -controller0.example.com -controller1.example.com -controller2.example.com - -[automationhub] -hub0.example.com -hub1.example.com -hub2.example.com ------ - -Then the load balancer can use the FQDNs `controller.example.com` and `hub.example.com` for the user-facing name of these {PlatformNameShort} services. - -When a load balancer is used in front of the {PrivateHubName}, the installer must be aware of the load balancer FQDN. Before installing {PlatformNameShort}, in the installation inventory file set the `automationhub_main_url` variable to the FQDN of the load balancer. For example, to match the previous example, you would set the variable to `automationhub_main_url = hub.example.com`. diff --git a/downstream/modules/aap-hardening/ref-dns.adoc b/downstream/modules/aap-hardening/ref-dns.adoc index e256ae0797..0392eb8a69 100644 --- a/downstream/modules/aap-hardening/ref-dns.adoc +++ b/downstream/modules/aap-hardening/ref-dns.adoc @@ -5,4 +5,5 @@ [role="_abstract"] -When installing {PlatformNameShort}, the installer script checks that certain infrastructure servers are defined with a Fully Qualified Domain Name (FQDN) in the installer inventory. This guide recommends that all {PlatformNameShort} infrastructure nodes have a valid FQDN defined in DNS which resolves to a routable IP address, and that these FQDNs be used in the installer inventory file. \ No newline at end of file +When installing {PlatformNameShort}, the installer script checks that certain infrastructure servers are defined with a _Fully Qualified Domain Name_ (FQDN) in the installer inventory. +This guide recommends that all {PlatformNameShort} infrastructure nodes have a valid FQDN defined in DNS which resolves to a routable IP address, and that these FQDNs be used in the installer inventory file. \ No newline at end of file diff --git a/downstream/modules/aap-hardening/ref-infrastructure-as-code.adoc b/downstream/modules/aap-hardening/ref-infrastructure-as-code.adoc index 1b8f700125..c9f482b51f 100644 --- a/downstream/modules/aap-hardening/ref-infrastructure-as-code.adoc +++ b/downstream/modules/aap-hardening/ref-infrastructure-as-code.adoc @@ -3,11 +3,13 @@ [id="ref-infrastructure-as-code_{context}"] -= Use infrastructure as code paradigm += Use a configuration as code paradigm [role="_abstract"] -The Red Hat Community of Practice has created a set of automation content available via collections to manage {PlatformNameShort} infrastructure and configuration as code. This enables automation of the platform itself through Infrastructure as Code (IaC) or Configuration as Code (CaC). While many of the benefits of this approach are clear, there are critical security implications to consider. +The Red Hat Community of Practice has created a set of automation content available through collections to manage {PlatformNameShort} infrastructure and configuration as code. +This enables automation of the platform itself through _Configuration as Code_ (CaC). +While many of the benefits of this approach are clear, there are security implications to consider. The following Ansible content collections are available for managing {PlatformNameShort} components using an infrastructure as code methodology, all of which are found on the link:https://console.redhat.com/ansible/automation-hub[Ansible Automation Hub]: @@ -16,21 +18,25 @@ The following Ansible content collections are available for managing {PlatformNa | *Validated Collection* | *Collection Purpose* | `infra.aap_utilities` | Ansible content for automating day 1 and day 2 operations of {PlatformNameShort}, including installation, backup and restore, certificate management, and more. -| `infra.controller_configuration` | A collection of roles to manage {ControllerName} components, including managing users and groups (RBAC), projects, job templates and workflows, credentials, and more. - -| `infra.ah_configuration` | Ansible content for interacting with {HubName}, including users and groups (RBAC), collection upload and management, collection approval, managing the {ExecEnvShort} image registry, and more. +| `infra.aap_configuration` | A collection of roles to manage the creation of {PlatformNameShort} components, including users and groups (RBAC), projects, job templates and workflows, credentials, and more. This collection includes functionality from the older 'infra.controller_configuration`, `infra.ah_configuration` and `infra.eda_configuration` and should be used in their place with {PlatformNameShort} {PlatformVers}. | `infra.ee_utilities` | A collection of roles for creating and managing {ExecEnvShort} images, or migrating from the older Tower virtualenvs to execution environments. |=== -Many organizations use CI/CD platforms to configure pipelines or other methods to manage this type of infrastructure. However, using {PlatformNameShort} natively, a webhook can be configured to link a Git-based repository natively. In this way, Ansible can respond to git events to trigger Job Templates directly. This removes the need for external CI components from this overall process and thus reduces the attack surface. +Many organizations use CI/CD platforms to configure pipelines or other methods to manage this type of infrastructure. +However, using {PlatformNameShort} natively, a webhook can be configured to link a Git-based repository natively. +In this way, Ansible can respond to Git events to trigger Job Templates directly. +This removes the need for external CI components from this overall process and thus reduces the attack surface. -These practices allow version control of all infrastructure and configuration. Apply Git best practices to ensure proper code quality inspection prior to being synchronized into {PlatformNameShort}. Relevant Git best practices include the following: +These practices enable version control of all infrastructure and configuration. +Apply Git best practices to ensure proper code quality inspection before being synchronized into {PlatformNameShort}. Relevant Git best practices include the following: * Creating pull requests. * Ensuring that inspection tools are in place. * Ensuring that no plain text secrets are committed. * Ensuring that pre-commit hooks and any other policies are followed. -IaC also encourages using external vault systems which removes the need to store any sensitive data in the repository, or deal with having to individually vault files as needed. For more information on using external vault systems, see section xref:con-external-credential-vault_{context}[2.3.2.3 External credential vault considerations] within this guide. +CaC also encourages using external vault systems which removes the need to store any sensitive data in the repository, or deal with having to individually vault files as needed. +This is particularly important when storing {PlatformNameShort} configuration in a source code repository, as {ControllerName} credentials and {EDAName} credentials must be provided to the collection variables in plain text which should not be committed to a source repository. +For more information on using external vault systems, see the xref:con-external-credential-vault_{context}[External credential vault considerations] section in this guide. diff --git a/downstream/modules/aap-hardening/ref-initial-configuration.adoc b/downstream/modules/aap-hardening/ref-initial-configuration.adoc index 697b7fa705..f039b974b1 100644 --- a/downstream/modules/aap-hardening/ref-initial-configuration.adoc +++ b/downstream/modules/aap-hardening/ref-initial-configuration.adoc @@ -7,9 +7,16 @@ [role="_abstract"] -Granting access to certain parts of the system exposes security vulnerabilities. Apply the following practices to help secure access: +Granting access to certain parts of the system exposes security vulnerabilities. +Apply the following practices to help secure access: -* Minimize access to system administrative accounts. There is a difference between the user interface (web interface) and access to the operating system that the {ControllerName} is running on. A system administrator or root user can access, edit, and disrupt any system application. Anyone with root access to the controller has the potential ability to decrypt those credentials, and so minimizing access to system administrative accounts is crucial for maintaining a secure system. -* Minimize local system access. {ControllerNameStart} should not require local user access except for administrative purposes. Non-administrator users should not have access to the controller system. -* Enforce separation of duties. Different components of automation may need to access a system at different levels. Use different keys or credentials for each component so that the effect of any one key or credential vulnerability is minimized. -* Restrict {ControllerName} to the minimum set of users possible for low-level controller configuration and disaster recovery only. In a controller context, any controller ‘system administrator’ or ‘superuser’ account can edit, change, and update any inventory or automation definition in the controller. \ No newline at end of file +* Minimize access to system administrative accounts. +There is a difference between the user interface (web interface) and access to the operating system that the {ControllerName} is running on. +A system administrator or super user can access, edit, and disrupt any system application. +Anyone with root access to {ControllerName} has the potential ability to decrypt those credentials, and so minimizing access to system administrative accounts is crucial for maintaining a secure system. +* Minimize local system access. {ControllerNameStart} should not require local user access except for administrative purposes. +Non-administrator users should not have access to the {ControllerName} system. +* Enforce separation of duties. +Different components of automation might need to access a system at different levels. +Use different keys or credentials for each component so that the effect of any one key or credential vulnerability is minimized. +* Restrict {ControllerName} to the minimum set of users possible for low-level {ControllerName} configuration and disaster recovery only. In an {ControllerName} context, any {ControllerName} ‘system administrator’ or ‘superuser’ account can edit, change, and update any inventory or automation definition in {ControllerName}. \ No newline at end of file diff --git a/downstream/modules/aap-hardening/ref-install-security-updates.adoc b/downstream/modules/aap-hardening/ref-install-security-updates.adoc new file mode 100644 index 0000000000..87f2e3a92c --- /dev/null +++ b/downstream/modules/aap-hardening/ref-install-security-updates.adoc @@ -0,0 +1,18 @@ +[id="ref-install-security-updates"] + += Installing security updates only + +For organizations with a policy requiring that all RPMs including security errata be kept up to date, the following playbook might be used in a regularly scheduled job template. + +---- +- name: Install all security-related RPM updates + hosts: target_hosts + become: true + + tasks: + - name: Install latest RPMs with security errata + ansible.builtin.dnf: + name: '*' + security: true + state: latest +---- \ No newline at end of file diff --git a/downstream/modules/aap-hardening/ref-interactive-session-timeout.adoc b/downstream/modules/aap-hardening/ref-interactive-session-timeout.adoc new file mode 100644 index 0000000000..1dec2e32d9 --- /dev/null +++ b/downstream/modules/aap-hardening/ref-interactive-session-timeout.adoc @@ -0,0 +1,24 @@ +[id="ref-interactive-session-timeout"] + += Interactive session timeout + +A compliance profile might require that an interactive session timeout be enforced. +For example, the DISA STIG requires that all users be automatically logged out after 15 minutes of inactivity. +The installation process often requires an hour or more to complete, and this control can stop the installation process and log out the user before installation is complete. +The same also applies to day two operations such as backup and restore, which in production environments often take longer than the recommended interactive session timeout. +During these operations, increase the interactive session timeout to ensure the operation is successful. + +There are multiple ways in which this control can be enforced, including shell timeout variables, setting the idle session timeout for `systemd-logind`, or setting SSH connection timeouts, and different compliance profiles can use one or more of these methods. +The one that most often interrupts the installation and day two operations is the idle session timeout for `systemd-logind`, which was introduced in the DISA STIG version V2R1 ({RHEL} 8) and V2R2 ({RHEL} 9). To increase the idle session timeout for `systemd-logind`, as the root user: + +* Edit the file `/etc/systemd/logind.conf`. +* If the `StopIdleSessionSec` setting is set to zero, no change is needed. +* If the `StopIdleSessionSec` setting is non-zero, this indicates that the session will be terminated after that number of seconds. ++ +Change `StopIdleSessionSec=7200` to increase the timeout, then run `systemctl restart systemd.logind` to apply the change. +* Log out of the interactive session entirely and log back in to ensure the new setting applies to the current login session. + +[NOTE] +==== +This change only needs to be made on the installation host, or if an installation host is not used, the host where the {PlatformNameShort} installer is run. +==== \ No newline at end of file diff --git a/downstream/modules/aap-hardening/ref-keep-up-to-date.adoc b/downstream/modules/aap-hardening/ref-keep-up-to-date.adoc new file mode 100644 index 0000000000..d3a56bde26 --- /dev/null +++ b/downstream/modules/aap-hardening/ref-keep-up-to-date.adoc @@ -0,0 +1,18 @@ +[id="ref-keep-up-to-date"] + += Keeping everything up to date + +For some {RHEL} servers, such as a lab or other non-production systems, you might want to install all available patches on a regular cadence. +The following example playbook might be used in a job template that is scheduled to run weekly, and updates the system with all of the latest RPMs. + +---- +- name: Install all available RPM updates + hosts: target_hosts + become: true + + tasks: + - name: Install latest RPMs + ansible.builtin.dnf: + name: '*' + state: latest +---- \ No newline at end of file diff --git a/downstream/modules/aap-hardening/ref-ntp.adoc b/downstream/modules/aap-hardening/ref-ntp.adoc index 54e86ce600..360fa1b4b4 100644 --- a/downstream/modules/aap-hardening/ref-ntp.adoc +++ b/downstream/modules/aap-hardening/ref-ntp.adoc @@ -7,6 +7,8 @@ [role="_abstract"] -Configure each server in the {PlatformNameShort} infrastructure to synchronize time with an NTP pool or your organization's NTP service. This ensures that logging and auditing events generated by {PlatformNameShort} have an accurate time stamp, and that any scheduled jobs running from the {ControllerName} execute at the correct time. +Configure each server in the {PlatformNameShort} infrastructure to synchronize time with an _Network Time Protocol_ (NTP) pool or your organization's NTP service. +This ensures that logging and auditing events generated by {PlatformNameShort} have an accurate time stamp, and that any scheduled jobs running from the {ControllerName} execute at the correct time. +This also enables communication between the systems within {PlatformNameShort} to not reject messages based on timeouts. For information on configuring the chrony service for NTP synchronization, see link:{BaseURL}/red_hat_enterprise_linux/8/html/configuring_basic_system_settings/configuring-time-synchronization_configuring-basic-system-settings#using-chrony_configuring-time-synchronization[Using Chrony] in the {RHEL} documentation. diff --git a/downstream/modules/aap-hardening/ref-security-variables-install-inventory.adoc b/downstream/modules/aap-hardening/ref-security-variables-install-inventory.adoc index ee96dc49a7..15bf54da55 100644 --- a/downstream/modules/aap-hardening/ref-security-variables-install-inventory.adoc +++ b/downstream/modules/aap-hardening/ref-security-variables-install-inventory.adoc @@ -7,29 +7,66 @@ [role="_abstract"] -The installation inventory file defines the architecture of the {PlatformNameShort} infrastructure, and provides a number of variables that can be used to modify the initial configuration of the infrastructure components. For more information on the installer inventory, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/red_hat_ansible_automation_platform_installation_guide/index#proc-editing-installer-inventory-file_platform-install-scenario[Ansible Automation Platform Installation Guide]. +The installation inventory file defines the architecture of the {PlatformNameShort} infrastructure and provides a number of variables that can be used to modify the initial configuration of the infrastructure components. For more information on the installer inventory, see link:{URLPlanningGuide}/about_the_installer_inventory_file[About the installer inventory file]. -The following table lists a number of security-relevant variables and their recommended values for creating the installation inventory. +The following table lists a number of security-relevant variables and their recommended values for an RPM-based deployment. .Security-relevant inventory variables +[cols="33%,33%,33%",options="header"] |=== -| *Variable* | *Recommended Value* | *Details* -| `postgres_use_ssl` | true | The installer configures the installer-managed Postgres database to accept SSL-based connections when this variable is set. +| *RPM deployment variable* | *Recommended Value* | *Details* -| `pg_sslmode` | verify-full | By default, when the controller connects to the database, it tries an encrypted connection, but it is not enforced. Setting this variable to "verify-full" requires a mutual TLS negotiation between the controller and the database. The `postgres_use_ssl` variable must also be set to "true" for this `pg_sslmode` to be effective. +| `postgres_use_ssl` | true | The installation program configures the installer-managed Postgres database to accept SSL-based connections when this variable is set. + +The default for this variable is false which means SSL/TLS is not used for PostgreSQL connections. + +When set to true, the platform connects to PostgreSQL by using SSL/TLS. + +| `pg_sslmode` `automation_gateway_pg_sslmode` `automationhub_pg_sslmode` `automationcontroller_pg_sslmode` |verify-full | These variables control mutual TLS (mTLS) authentication to the database. +By default, when each service connects to the database, it tries an encrypted connection, but it is not enforced. + +Setting this variable to `verify-full` enforces an mTLS negotiation between the service and the database. +The `postgres_use_ssl` variable must also be set to `true` for this pg_sslmode to be effective. *NOTE*: If a third-party database is used instead of the installer-managed database, the third-party database must be set up independently to accept mTLS connections. -| `nginx_disable_https` | false | If set to "true", this variable disables HTTPS connections to the controller. The default is "false", so if this variable is absent from the installer inventory it is effectively the same as explicitly defining the variable to "false". +| `nginx_disable_hsts` `automation_gateway_disable_hsts` `automationhub_disable_hsts` `automationcontroller_disble_hsts` | false | If set to `true`, these variables disable HTTPS _strict transport Security_ (HSTS) connections to each of the component web services. -| `automationhub_disable_https` | false | If set to "true", this variable disables HTTPS connections to the {PrivateHubName}. The default is "false", so if this variable is absent from the installer inventory it is effectively the same as explicitly defining the variable to "false". +The default is `false`. If these variables are absent from the installer inventory it is effectively the same as defining the variables as `false`. +|=== + +The following table lists a number of security-relevant variables and their recommended values for a container-based deployment. -| `automationedacontroller_disable_https` | false | If set to "true", this variable disables HTTPS connections to the {EDAcontroller}. The default is "false", so if this variable is absent from the installer inventory it is effectively the same as explicitly defining the variable to "false". +.Security-relevant containerized inventory variables +[cols="33%,33%,33%",options="header"] |=== +| *Container deployment variable* | *Recommended Value* | *Details* +| `postgresql_disable_tls` | false | If set to `true`, this variable disables TLS connections to the installer-managed PostgreSQL database. + +The default is `false` + +If this variable is absent from the installer inventory, it is effectively the same as defining the variable as `false`. -In scenarios such as the reference architecture where a load balancer is used with multiple controllers or hubs, SSL client connections can be terminated at the load balancer or passed through to the individual {PlatformNameShort} servers. If SSL is being terminated at the load balancer, this guide recommends that the traffic gets re-encrypted from the load balancer to the individual {PlatformNameShort} servers, to ensure that end-to-end encryption is in use. In this scenario, the `*_disable_https` variables listed in Table 2.3 would remain the default value of "false". +| `controller_pg_sslmode` `gateway_pg_sslmode` `hub_pg_sslmode` `eda_pg_sslmode` | verify-full a| These variables control mutual TLS (mTLS) authentication to the database. + +By default, when each service connects to the database, it tries an encrypted connection, but it is not enforced. Setting this variable to `verify-full` enforces an mTLS negotiation between the service and the database. [NOTE] ==== -This guide recommends using an external database in production environments, but for development and testing scenarios the database could be co-located on the {ControllerName}. Due to current PostgreSQL 13 limitations, setting `pg_sslmode = verify-full` when the database is co-located on the {ControllerName} results in an error validating the host name during TLS negotiation. Until this issue is resolved, an external database must be used to ensure mutual TLS authentication between the {ControllerName} and the database. +If a third-party database is used instead of the installer-managed database, the third-party database must be set up independently to accept mTLS connections. ==== + +| `controller_nginx_disable_https` `gateway_nginx_disable_https` `hub_nginx_disable_https` `da_nginx_disable_https` | `false` | If set to `true`, these variables disable HTTPS connections to each of the component web services. +The default is `false`. + +If these variables are absent from the installer inventory, it is effectively the same as defining the variables as `false`. + +| `controller_nginx_disable_hsts` `gateway_nginx_disable_hsts` `hub_nginx_disable_hsts` `eda_nginx_disable_hsts` | `false` | If set to 'true', these variables disable HTTPS Strict Transport Security (HSTS) connections to each of the component web services. The default is "false". If these variables are absent from the installer inventory it is effectively the same as defining the variables as `false`. +|=== + + +In an enterprise topology where a load balancer is used in front of multiple {Gateway}s, SSL client connections can be terminated at the load balancer or passed through to the individual AAP servers. +If SSL is being terminated at the load balancer, this guide recommends that the traffic gets re-encrypted from the load balancer to the individual {PlatformNameShort} servers. +This ensures that end-to-end encryption is in use. +In this scenario, the `*_disable_https` variables listed are set to the default value of `false`. + diff --git a/downstream/modules/aap-hardening/ref-sensitive-variables-install-inventory.adoc b/downstream/modules/aap-hardening/ref-sensitive-variables-install-inventory.adoc index 60a08b2883..e239dd8e3b 100644 --- a/downstream/modules/aap-hardening/ref-sensitive-variables-install-inventory.adoc +++ b/downstream/modules/aap-hardening/ref-sensitive-variables-install-inventory.adoc @@ -9,21 +9,26 @@ The installation inventory file contains a number of sensitive variables, mainly those used to set the initial passwords used by {PlatformNameShort}, that are normally kept in plain text in the inventory file. To prevent unauthorized viewing of these variables, you can keep these variables in an encrypted link:https://docs.ansible.com/ansible/latest/vault_guide/index.html[Ansible vault]. To do this, go to the installer directory and create a vault file: -* `cd /path/to/ansible-automation-platform-setup-bundle-2.4-1-x86_64` +* `cd /path/to/ansible-automation-platform-setup-bundle-2.5-1-x86_64` * `ansible-vault create vault.yml` -You will be prompted for a password to the new Ansible vault. Do not lose the vault password because it is required every time you need to access the vault file, including during day-two operations and performing backup procedures. You can secure the vault password by storing it in an encrypted password manager or in accordance with your organizational policy for storing passwords securely. +You are prompted for a password to the new Ansible vault. +Do not lose the vault password because it is required every time you need to access the vault file, including during day-two operations and performing backup procedures. +You can secure the vault password by storing it in an encrypted password manager or in accordance with your organizational policy for storing passwords securely. Add the sensitive variables to the vault, for example: +//Added containerized variables RPM/containerized: + ---- -admin_password: -pg_password: -automationhub_admin_password: -automationhub_pg_password: -automationhub_ldap_bind_password: -automationedacontroller_admin_password: -automationedacontroller_pg_password: +admin_password/controller_admin_password: +pg_password/controller_pg_password: +automationhub_admin_password/hub_admin_password: +automationhub_pg_password/hub_pg_password: +automationedacontroller_admin_password/eda_admin_password: +automationedacontroller_pg_password/eda_pg_password: +-/gateway_admin_password: +-/gateway_pg_password: ---- Make sure these variables are not also present in the installation inventory file. To use the new Ansible vault with the installer, run it with the command `./setup.sh -e @vault.yml -- --ask-vault-pass`. diff --git a/downstream/modules/aap-hardening/ref-specify-package-versions.adoc b/downstream/modules/aap-hardening/ref-specify-package-versions.adoc new file mode 100644 index 0000000000..416d387aaf --- /dev/null +++ b/downstream/modules/aap-hardening/ref-specify-package-versions.adoc @@ -0,0 +1,42 @@ +[id="ref-specify-package-versions"] + += Specifying package versions + +For production systems, a well-established configuration management practice is to deploy only known, tested combinations of software to ensure that systems are configured correctly and perform as expected. +This includes deploying only known versions of operating system software and patches to ensure that system updates do not introduce problems with production applications. + +[NOTE] +==== +The following example playbook installs a specific version of the `httpd` RPM and its dependencies when the target host uses the RHEL 9 operating system. +This playbook does not take action if the specified versions are already in place or if a different version of RHEL is installed. +==== +---- +- name: Install specific RPM versions + hosts: target_hosts + gather_facts: true + become: true + + vars: + httpd_packages_rhel9: + - httpd-2.4.53-11.el9_2.5 + - httpd-core-2.4.53-11.el9_2.5 + - httpd-filesystem-2.4.53-11.el9_2.5 + - httpd-tools-2.4.53-11.el9_2.5 + - mod_http2-1.15.19-4.el9_2.4 + - mod_lua-2.4.53-11.el9_2.5 + + tasks: + - name: Install httpd and dependencies + ansible.builtin.dnf: + name: '{{ httpd_packages_rhel9 }}' + state: present + allow_downgrade: true + when: + - ansible_distribution == "RedHat" + - ansible_distribution_major_version == "9" +---- + +[NOTE] +==== +By setting `allow_downgrade: true`, if a newer version of any defined package is installed on the system, it is downgraded to the specified version instead. +==== \ No newline at end of file diff --git a/downstream/modules/aap-hardening/ref-sudo-nopasswd.adoc b/downstream/modules/aap-hardening/ref-sudo-nopasswd.adoc index 24064f37bd..76da91a5b5 100644 --- a/downstream/modules/aap-hardening/ref-sudo-nopasswd.adoc +++ b/downstream/modules/aap-hardening/ref-sudo-nopasswd.adoc @@ -5,9 +5,21 @@ = Sudo and NOPASSWD -[role="_abstract"] +A compliance profile might require that all users with sudo privileges must provide a password (that is, the `NOPASSWD` directive must not be used in a sudoers file). +The {PlatformNameShort} installer runs many tasks as a privileged user, and by default expects to be able to elevate privileges without a password. +To provide a password to the installer for elevating privileges, append the following options when launching the RPM installer script: -The {RHEL} 8 STIG requires that all users with sudo privileges must provide a password (that is, the "NOPASSWD" directive must not be used in a sudoers file). The {PlatformNameShort} installer runs many tasks as a privileged user, and by default expects to be able to elevate privileges without a password. To provide a password to the installer for elevating privileges, append the following options when launching the installer script: `./setup.sh -- –-ask-become-pass`. +`./setup.sh --ask-become-pass`. + +For the container-based installer: + +`ansible-playbook ansible.containerized_installer.install --ask-become-pass` + +When the installer is run, you are prompted for the user's password to elevate privileges. + +[NOTE] +==== +Using the `--ask-become-pass` option also applies when running the installer for day-two operations such as backup and restore. +==== -This also applies when running the installer script for day-two operations such as backup and restore. diff --git a/downstream/modules/analytics/con-jobs-explorer.adoc b/downstream/modules/analytics/con-jobs-explorer.adoc index e412928e5f..d1d5a98656 100644 --- a/downstream/modules/analytics/con-jobs-explorer.adoc +++ b/downstream/modules/analytics/con-jobs-explorer.adoc @@ -35,4 +35,4 @@ You can click on the arrow icon next to the job *Id/Name* column to view more de == Reviewing job details on {ControllerName} -Click the job in the *Id/Name* column to view the job itself on the {ControllerName} job details page. For more information on viewing job details on {ControllerName}, see _Jobs_ in the {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-jobs[Automation Controller User Guide]. +Click the job in the *Id/Name* column to view the job itself on the {ControllerName} job details page. For more information on job settings for {ControllerName}, see Jobs in automation controller in the {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/using_automation_execution/controller-jobs[{TitleControllerUserGuide}]. \ No newline at end of file diff --git a/downstream/modules/analytics/con-review-savings-calculations.adoc b/downstream/modules/analytics/con-review-savings-calculations.adoc index 29d3addccd..6e72f8a094 100644 --- a/downstream/modules/analytics/con-review-savings-calculations.adoc +++ b/downstream/modules/analytics/con-review-savings-calculations.adoc @@ -6,7 +6,7 @@ = Review savings calculations for your automation plans -The {planner} offers a calculation of how much time and money you can save by automating a job. {InsightsName} takes data from the plan details and the associated job template to provide you with an accurate projection of your cost savings when you complete this savings plan. +The {planner} offers a calculation of how much time and money you can save by automating a job. automation analytics takes data from the plan details and the associated job template to provide you with an accurate projection of your cost savings when you complete this savings plan. To do so, navigate to your savings planner page, click the name of an existing plan, then navigate to the *Statistics* tab. diff --git a/downstream/modules/analytics/proc-ignoring-nested-workflows-jobs.adoc b/downstream/modules/analytics/proc-ignoring-nested-workflows-jobs.adoc index 3573e9ae6e..934b2fea97 100644 --- a/downstream/modules/analytics/proc-ignoring-nested-workflows-jobs.adoc +++ b/downstream/modules/analytics/proc-ignoring-nested-workflows-jobs.adoc @@ -12,5 +12,5 @@ Select the settings icon on the *Job Explorer* view and use the toggle switch to Nested workflows allow you to create workflow job templates that call other workflow job templates. Nested workflows promotes reuse, as modular components, of workflows that include existing business logic and organizational requirements in automating complex processes and operations. -To learn more about nested workflows, see _Workflows_ in the {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-workflows[Automation Controller User Guide]. +To learn more about nested workflows, see Workflows in automation controller in the {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/using_automation_execution/controller-workflows[{TitleControllerUserGuide}]. ==== diff --git a/downstream/modules/analytics/proc-link-plan-job-template.adoc b/downstream/modules/analytics/proc-link-plan-job-template.adoc index 47d7018e2d..5ee526ee80 100644 --- a/downstream/modules/analytics/proc-link-plan-job-template.adoc +++ b/downstream/modules/analytics/proc-link-plan-job-template.adoc @@ -6,7 +6,7 @@ = Link a savings plan to a job template -You can associate a job template to a savings plan to allow {InsightsShort} to provide a more accurate time and cost savings estimate for completing this savings plan. +You can associate a job template to a savings plan to allow automation analytics to provide a more accurate time and cost savings estimate for completing this savings plan. .Procedure . From the navigation panel, select {MenuAASavingsPlanner}. diff --git a/downstream/modules/analytics/proc-review-reports.adoc b/downstream/modules/analytics/proc-review-reports.adoc index 193155c99d..5d60d1b884 100644 --- a/downstream/modules/analytics/proc-review-reports.adoc +++ b/downstream/modules/analytics/proc-review-reports.adoc @@ -15,4 +15,4 @@ To view reports about your Ansible automation environment, proceed with the foll Each report presents data to monitor your Ansible automation environment. Use the filter toolbar on each report to adjust your graph view. -NOTE: We are constantly adding new reports to the system. If you have ideas for new reports that would be helpful for your team, please contact your account representative or log a feature enhancement for {InsightsShort}. +NOTE: We are constantly adding new reports to the system. If you have ideas for new reports that would be helpful for your team, please contact your account representative or log a feature enhancement for automation analytics. diff --git a/downstream/modules/builder/con-about-ee.adoc b/downstream/modules/builder/con-about-ee.adoc index 71d00bcacf..add864b5a4 100644 --- a/downstream/modules/builder/con-about-ee.adoc +++ b/downstream/modules/builder/con-about-ee.adoc @@ -9,8 +9,8 @@ All automation in {PlatformName} runs on container images called {ExecEnvName}. An {ExecEnvNameSing} should contain the following: -* Ansible Core 2.15 or later -* Python 3.8-3.11 +* Ansible Core 2.16 or later +* Python 3.10 or later * {Runner} * Ansible content collections and their dependencies * System dependencies diff --git a/downstream/modules/builder/con-ee-precedence.adoc b/downstream/modules/builder/con-ee-precedence.adoc index cf24802f1a..9311af6041 100644 --- a/downstream/modules/builder/con-ee-precedence.adoc +++ b/downstream/modules/builder/con-ee-precedence.adoc @@ -8,7 +8,7 @@ Project updates will always use the control plane {ExecEnvName} by default, howe . The `default_environment` defined on the project that the job uses. . The `default_environment` defined on the organization of the job. . The `default_environment` defined on the organization of the inventory the job uses. -. The current `DEFAULT_EXECUTION_ENVIRONMENT` setting (configurable at `api/v2/settings/jobs/`) +. The current `DEFAULT_EXECUTION_ENVIRONMENT` setting (configurable at `api/v2/settings/system/`) . Any image from the `GLOBAL_JOB_EXECUTION_ENVIRONMENTS` setting. . Any other global {ExecEnvShort}. diff --git a/downstream/modules/builder/proc-customize-ee-image.adoc b/downstream/modules/builder/proc-customize-ee-image.adoc index b0c17b6727..aa879999d7 100644 --- a/downstream/modules/builder/proc-customize-ee-image.adoc +++ b/downstream/modules/builder/proc-customize-ee-image.adoc @@ -40,7 +40,7 @@ collections: version: 3 images: - base_image: 'registry.redhat.io/ansible-automation-platform-24/ee-minimal-rhel9:latest' + base_image: 'registry.redhat.io/ansible-automation-platform-25/ee-minimal-rhel9:latest' dependencies: galaxy: @@ -91,7 +91,7 @@ $ podman tag [username]/new-ee [automation-hub-IP-address]/[username]/new-ee + [NOTE] ===== -You must have `admin` or appropriate container repository permissions for {HubName} to push a container. For more information, see the _Manage containers in {PrivateHubName}_ section in link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/managing_content_in_automation_hub/index#managing-containers-hub[Managing content in automation hub]. +You must have `admin` or appropriate container repository permissions for {HubName} to push a container. For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/managing_automation_content/index#managing-containers-hub[Manage containers in private automation hub]. ===== + ----- diff --git a/downstream/modules/builder/proc-executing-build.adoc b/downstream/modules/builder/proc-executing-build.adoc index d4148d6abf..a0bd4fe701 100644 --- a/downstream/modules/builder/proc-executing-build.adoc +++ b/downstream/modules/builder/proc-executing-build.adoc @@ -4,6 +4,11 @@ After you create a definition file, you can proceed to build an {ExecEnvNameSing} image. +[NOTE] +==== +When building an {ExecEnvShort} image, it must support the architecture that {PlatformNameShort} is deployed with. +==== + .Prerequisites * You have created a definition file. diff --git a/downstream/modules/builder/proc-installing-builder.adoc b/downstream/modules/builder/proc-installing-builder.adoc index 7715b5360e..e28aa7410d 100644 --- a/downstream/modules/builder/proc-installing-builder.adoc +++ b/downstream/modules/builder/proc-installing-builder.adoc @@ -4,12 +4,12 @@ .Prerequisites * You have installed the Podman container runtime. -* You have valid subscriptions attached on the host. Doing so allows you to access the subscription-only resources needed to install `ansible-builder`, and ensures that the necessary repository for `ansible-builder` is automatically enabled. See link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/red_hat_ansible_automation_platform_planning_guide/index#proc-attaching-subscriptions_planning[Attaching your Red Hat {PlatformNameShort} subscription] for more information. +* You have valid subscriptions attached on the host. Doing so allows you to access the subscription-only resources needed to install `ansible-builder`, and ensures that the necessary repository for `ansible-builder` is automatically enabled. See link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/access_management_and_authentication/index#proc-attaching-subscriptions[Attaching your Red Hat {PlatformNameShort} subscription] for more information. .Procedure * In your terminal, run the following command to install {Builder} and activate your {PlatformNameShort} repo: + ---- -# dnf install --enablerepo=ansible-automation-platform-2.4-for-rhel-9-x86_64-rpms ansible-builder +# dnf install --enablerepo=ansible-automation-platform-2.5-for-rhel-9-x86_64-rpms ansible-builder ---- diff --git a/downstream/modules/builder/ref-scenario-using-authentication-ee.adoc b/downstream/modules/builder/ref-scenario-using-authentication-ee.adoc index 979fea7571..09a0722fd6 100644 --- a/downstream/modules/builder/ref-scenario-using-authentication-ee.adoc +++ b/downstream/modules/builder/ref-scenario-using-authentication-ee.adoc @@ -8,7 +8,7 @@ Use the following example to customize the default definition file to pass {HubN .Prerequisites -* You have link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_automation_hub/hub-create-api-token[created an {HubName} API token] and stored it in a secure location, for example in a file named `token.txt`. +* You have created an API token, as in link:{URLHubManagingContent}/managing-cert-valid-content#proc-create-api-token[Retrieving the API token for your Red Hat Certified Collection] and stored it in a secure location, for example in a file named `token.txt`. * Define a build argument that gets populated with the {HubName} API token: ---- export ANSIBLE_GALAXY_SERVER_AUTOMATION_HUB_TOKEN=$(cat ) diff --git a/downstream/modules/dev-guide/proc-select-custom-venv-export.adoc b/downstream/modules/dev-guide/proc-select-custom-venv-export.adoc index 2574e1f25a..cf20aac006 100644 --- a/downstream/modules/dev-guide/proc-select-custom-venv-export.adoc +++ b/downstream/modules/dev-guide/proc-select-custom-venv-export.adoc @@ -3,7 +3,7 @@ = Selecting the custom virtual environment to export [role="_abstract"] -Select the custom virtual environment you wish to export using `awx-manage export_custom_venv` command. +Select the custom virtual environment you want to export by using `awx-manage export_custom_venv` command. .Procedure diff --git a/downstream/modules/devtools/con-devtools-plan-roles-collection.adoc b/downstream/modules/devtools/con-devtools-plan-roles-collection.adoc new file mode 100644 index 0000000000..c6a66a9a2d --- /dev/null +++ b/downstream/modules/devtools/con-devtools-plan-roles-collection.adoc @@ -0,0 +1,10 @@ +:_mod-docs-content-type: CONCEPT + +[id="plan-roles-collection_{context}"] += Planning your collection + +Organize smaller bundles of curated automation into separate collections for specific functions, rather than creating one big general collection for all of your roles. + +For example, you could store roles that manage the networking for an internal system called `myapp` in a `company_namespace.myapp_network` collection, +and store roles that manage and deploy networking in AWS in a collection called `company_namespace.aws_net`. + diff --git a/downstream/modules/devtools/con-devtools-requirements.adoc b/downstream/modules/devtools/con-devtools-requirements.adoc new file mode 100644 index 0000000000..ecb53d728a --- /dev/null +++ b/downstream/modules/devtools/con-devtools-requirements.adoc @@ -0,0 +1,24 @@ +[id="devtools-requirements_{context}"] + += Requirements + +[role="_abstract"] +To install and use {ToolsName}, you must meet the following requirements. +Extra requirements for Windows installations and containerized installations are indicated in the procedures. + +* Python 3.10 or later. +* {VSCode} (Visual Studio Code) with the Ansible extension added. See +xref:devtools-install-vsc_installing-devtools[Installing {VScode}]. +* For containerized installations, the Micorsoft Dev Containers {VSCode} extension. See +xref:devtools-ms-dev-containers-ext_installing-devtools[Installing and configuring the Dev Containers extension]. +* A containerization platform, for example Podman, Podman Desktop, Docker, or Docker Desktop. ++ +[NOTE] +==== +The installation procedure for {ToolsName} on Windows covers the use of Podman Desktop only. +See xref:devtools-install-podman-desktop-wsl_installing-devtools#installing_podman_desktop_on_a_windows_machine[Installing Podman Desktop on a Windows machine]. +==== +* You have a Red Hat account and you can log in to the Red Hat container registry at `registry.redhat.io`. +For information about logging in to `registry.redhat.io`, see +xref:devtools-setup-registry-redhat-io_installing-devtools[Authenticating with the Red Hat container registry]. + diff --git a/downstream/modules/devtools/con-devtools-roles-collection-prerequisites.adoc b/downstream/modules/devtools/con-devtools-roles-collection-prerequisites.adoc new file mode 100644 index 0000000000..2b30223c52 --- /dev/null +++ b/downstream/modules/devtools/con-devtools-roles-collection-prerequisites.adoc @@ -0,0 +1,15 @@ +:_mod-docs-content-type: CONCEPT + +[id="devtools-roles-collection-prerequisites_{context}"] += Prerequisites + +* You have installed {VSCode} and the Ansible extension. +* You have installed the Microsoft Dev Containers extension in {{VSCode}. +* You have installed {ToolsName}. +* You have installed a containerization platform, for example Podman, Podman Desktop, Docker, or Docker Desktop. +* You have a Red Hat account and you can log in to the Red Hat container registry at `registry.redhat.io`. +For information about logging in to `registry.redhat.io`, see +xref:devtools-setup-registry-redhat-io_installing-devtools[Authenticating with the Red Hat container registry]. +// * Considerations about environments / isolation (ADE / devcontainer files) + + diff --git a/downstream/modules/devtools/con-rhdh-install-ocp-prereqs.adoc b/downstream/modules/devtools/con-rhdh-install-ocp-prereqs.adoc new file mode 100644 index 0000000000..c76ec76b52 --- /dev/null +++ b/downstream/modules/devtools/con-rhdh-install-ocp-prereqs.adoc @@ -0,0 +1,17 @@ +:_mod-docs-content-type: CONCEPT + +[id="rhdh-install-ocp-prereqs_{context}"] += Prerequisites + +* {RHDH} installed on {OCP}. +** For Helm installation, follow the steps in the +https://docs.redhat.com/en/documentation/red_hat_developer_hub/{RHDHVers}/html/installing_red_hat_developer_hub_on_openshift_container_platform/index#assembly-install-rhdh-ocp-helm[Installing Red Hat Developer Hub on OpenShift Container Platform with the Helm chart] +section of _Installing Red Hat Developer Hub on OpenShift Container Platform_. +** For Operator installation, follow the steps in the +https://docs.redhat.com/en/documentation/red_hat_developer_hub/{RHDHVers}/html/installing_red_hat_developer_hub_on_openshift_container_platform/index#assembly-install-rhdh-ocp-operator[Installing Red Hat Developer Hub on OpenShift Container Platform with the Operator] +section of _Installing Red Hat Developer Hub on OpenShift Container Platform_. +* A valid subscription to {PlatformName}. +* An {OCPShort} instance with the appropriate permissions within your project to create an application. +* The {RHDH} instance can query the automation controller API. +* Optional: To use the integrated learning paths, you must have outbound access to developers.redhat.com. + diff --git a/downstream/modules/devtools/con-rhdh-recommended-preconfig.adoc b/downstream/modules/devtools/con-rhdh-recommended-preconfig.adoc new file mode 100644 index 0000000000..59c88dd504 --- /dev/null +++ b/downstream/modules/devtools/con-rhdh-recommended-preconfig.adoc @@ -0,0 +1,17 @@ +:_mod-docs-content-type: CONCEPT + +[id="rhdh-recommended-preconfig_{context}"] += Recommended {RHDHShort} preconfiguration + +Red Hat recommends performing the following initial configuration tasks in {RHDHShort}. +However, you can install the {AAPRHDH} before completing these tasks. + +* link:{BaseURL}/red_hat_developer_hub/{RHDHVers}/html/authentication/index[Setting up authentication in {RHDHShort}] +* link:{BaseURL}/red_hat_developer_hub/{RHDHVers}/html/authorization/index[Installing and configuring RBAC in {RHDHShort}] + +[NOTE] +==== +Red Hat provides a link:https://github.com/ansible/ansible-rhdh-templates/blob/main/all.yaml[repository of software templates for {RHDHShort}] that uses the `publish:github` action. +To use these software templates, you must install the required GitHub dynamic plugins. +==== + diff --git a/downstream/modules/devtools/proc-configure-extension-settings.adoc b/downstream/modules/devtools/proc-configure-extension-settings.adoc deleted file mode 100644 index 7cb5f8d153..0000000000 --- a/downstream/modules/devtools/proc-configure-extension-settings.adoc +++ /dev/null @@ -1,35 +0,0 @@ -[id="configure-extension-settings"] - -= Configuring Ansible extension settings - -[role="_abstract"] - -The Ansible {VSCode} extension supports multiple configuration options. -You can configure the settings for the extension on a user level, on a workspace level, or for a particular directory. -Workspace settings are stored within your workspace and only apply when the current workspace is opened. - -It is useful to configure settings at the workspace level for the following reasons: - -* If you define and maintain configurations specific to your playbook project, you can customize your Ansible development environment for individual projects without altering your preferred setup for other work. -* You can have different settings for a Python project, an Ansible project, and a C++ project, each optimized for the respective stack without the need to manually reconfigure settings each time you switch projects. -* If you include workspace settings when setting up version control for a project you want to share with your team, everyone uses the same configuration for that project. - -.Procedure - -. To open the Ansible extension settings, click the *Extensions* icon in the activity bar. -. Select the Ansible extension, and click the *Manage* icon ({SettingsIcon}) and then btn:[Extension Settings] to display the extension settings. -+ -Alternatively, select menu:Code[Settings > Settings] to open the *Settings* page. -Enter `Ansible` in the search bar to display the extension settings. -. Select the *Workspace* tab to configure your settings for the current {VSCode} workspace. - -The Ansible extension settings are pre-populated. - -* Check the *Ansible > Validation > Lint: Enabled* box to enable ansible-lint. -* Check the *Ansible Execution Environment: Enabled* box to use an execution environment. -* Specify the execution environment image you want to use in the *Ansible > Execution Environment: image* field. -* To use Ansible Lightspeed, check the *Ansible > Lightspeed: Enabled* box, and enter the URL for Lightspeed. - -// The settings are documented on the link:https://marketplace.visualstudio.com/items?itemName=redhat.ansible[Ansible VS Code Extension by Red Hat page] in the VisualStudio marketplace documentation. - - diff --git a/downstream/modules/devtools/proc-create-molecule-scenario.adoc b/downstream/modules/devtools/proc-create-molecule-scenario.adoc index ca32ca2e62..30243c5b2b 100644 --- a/downstream/modules/devtools/proc-create-molecule-scenario.adoc +++ b/downstream/modules/devtools/proc-create-molecule-scenario.adoc @@ -1,4 +1,4 @@ -[id="create-molcule-scenario"] +[id="create-molcule-scenario_{context}"] = Creating a molecule scenario diff --git a/downstream/modules/devtools/proc-create-python-venv.adoc b/downstream/modules/devtools/proc-create-python-venv.adoc index 7198090501..1b5a1759c6 100644 --- a/downstream/modules/devtools/proc-create-python-venv.adoc +++ b/downstream/modules/devtools/proc-create-python-venv.adoc @@ -1,4 +1,4 @@ -[id="create-python-venv"] +[id="create-python-venv_{context}"] = Creating a Python virtual environment diff --git a/downstream/modules/devtools/proc-debugging-playbook.adoc b/downstream/modules/devtools/proc-debugging-playbook.adoc index 1a4fa2d9c3..3d43e9e28e 100644 --- a/downstream/modules/devtools/proc-debugging-playbook.adoc +++ b/downstream/modules/devtools/proc-debugging-playbook.adoc @@ -1,26 +1,7 @@ -[id="debugging-playbook"] +[id="debugging-playbook_{context}"] = Debugging your playbook -[role="_abstract"] -The Ansible extension provides syntax highlighting and assists you with indentation in `.yml` files. - -The following rules apply for playbook files: - -* Every playbook file must finish with a blank line. -* Trailing spaces at the end of lines are not allowed. -* Every playbook and every play require an identifier (name). - -== Inline help - -* If you hover your mouse over a keyword or a module name, the Ansible extension provides documentation: -+ -image::ansible-lint-keyword-help.png[Ansible-lint showing no errors in a playbook] -* If you begin to type the name of a module, for example `ansible.builtin.ping`, the extension provides a list of suggestions. -Select one of the suggestions to autocomplete the line. -+ -image::ansible-lint-module-completion.png[Ansible-lint showing no errors in a playbook] - == Error messages The following playbook contains multiple errors: @@ -39,7 +20,7 @@ Hover your mouse over an error to view the details: image::ansible-lint-errors.png[Popup message explaining a playbook error] The errors are listed in the *Problems* tab of the {VSCode} terminal. -Playbook files that contain errors are indicated with a number in the Explorer pane: +Playbook files that contain errors are indicated with a number in the *Explorer* pane: image::ansible-lint-errors-explorer.png[Playbook errors shown in Problems tab and explorer list] diff --git a/downstream/modules/devtools/proc-devtools-create-aap-job.adoc b/downstream/modules/devtools/proc-devtools-create-aap-job.adoc new file mode 100644 index 0000000000..483d57b773 --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-create-aap-job.adoc @@ -0,0 +1,24 @@ +[id="create-aap-job_{context}"] + += Running your playbook in {PlatformNameShort} + +To run your playbook in {PlatformNameShort}, you must create a project in {ControllerName} for the repository where you stored your playbook project. +You can then create a job template for each playbook from the project. + +.Procedure + +. In a browser, log in to {controllername}. +. Configure a Source Control credential type for your source control system if necessary. See the +link:{URLControllerUserGuide}/controller-credentials#controller-create-credential[Creating new credentials] +section of _{TitleControllerUserGuide}_ for more details. +https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_execution/controller-credentials#controller-create-credential +. In {controllername}, create a project for the GitHub repository where you stored your playbook project. Refer to the +link:{URLControllerUserGuide}/controller-projects[Projects] +chapter of _{TitleControllerUserGuide}_. +. Create a job template that uses a playbook from the project that you created. Refer to the +link:{URLControllerUserGuide}/controller-job-templates[Job Templates] +chapter of _{TitleControllerUserGuide}_. +. Run your playbook from {ControllerName} by launching the job template. Refer to the +link:{URLControllerUserGuide}/controller-job-templates#controller-launch-job-template[Launching a job template] +section of _{TitleControllerUserGuide}_. + diff --git a/downstream/modules/devtools/proc-devtools-create-new-role-in-collection.adoc b/downstream/modules/devtools/proc-devtools-create-new-role-in-collection.adoc new file mode 100644 index 0000000000..3fe714a063 --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-create-new-role-in-collection.adoc @@ -0,0 +1,14 @@ +:_mod-docs-content-type: PROCEDURE + +[id="devtools-create-new-role-in-collection_{context}"] += Creating a new role in your collection + +.Procedure + +. To create a new role, copy the default `run` role directory that was scaffolded when you created the collection. +. Define the tasks that you want your role to perform in the `tasks/main.yml` file. +If you are creating a role to reuse tasks in an existing playbook, +copy the content in the tasks block of your playbook YAML file. +Remove the whitespace to the left of the tasks. +Use `ansible-lint` in {VSCode} to check your YAML code. +. If your role depends on another role, add the dependency in the `meta/main.yml` file. diff --git a/downstream/modules/devtools/proc-devtools-docs-roles-collection.adoc b/downstream/modules/devtools/proc-devtools-docs-roles-collection.adoc new file mode 100644 index 0000000000..9491d414cc --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-docs-roles-collection.adoc @@ -0,0 +1,37 @@ +:_mod-docs-content-type: PROCEDURE + +[id="devtools-docs-roles-collection_{context}"] += Adding documentation for your roles collection + +It is important to provide documentation for your roles so that other users understand what your roles do and how to use them. + +== Documenting your roles + +When you scaffolded your collection directory, a `README.md` file was added in the role directory. +Add your documentation for your role in this file. +Provide the following information in the `README.md` files for every role in your collection: + +* Role description: A brief summary of what the role does +* Requirements: List the collections, libraries, and required installations +* Dependencies +* Role variables: Provide the following information about the variables your role uses. +** Description +** Defaults +** Example values +** Required variables +* Example playbook: Show an example of a playbook that uses your role. +Use comments in the playbook to help users understand where to set variables. + +The `README.md` file in link:https://github.com/redhat-cop/controller_configuration/tree/devel/roles/ad_hoc_command_cancel[`controller_configuration.ad_hoc_command_cancel`] is an example of a role with standard documentation: + +== Documenting your collection + +In the `README.md` file for your collection, provide the following information: + +* Collection description: Describe what the collection does. +* Requirements: List required collections. +* List the roles as a component of the collection. +* Using the collection: Describe how to run the components of the collection. +* Add a troubleshooting section. +* Versioning: Describe the release cycle of your collection. + diff --git a/downstream/modules/devtools/proc-devtools-extension-run-ansible-navigator.adoc b/downstream/modules/devtools/proc-devtools-extension-run-ansible-navigator.adoc new file mode 100644 index 0000000000..f5fc5f916b --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-extension-run-ansible-navigator.adoc @@ -0,0 +1,29 @@ +[id="extension-run-ansible-navigator_{context}"] + += Running your playbook with `ansible-navigator` + +.Prerequisites + +* In the Ansible extension settings, enable the use of an execution environment in *Ansible Execution Environment > Enabled*. +* Enter the path or URL for the execution environment image in *Ansible > Execution Environment: Image*. + +.Procedure + +. To run a playbook, right-click the playbook name in the Explorer pane, then select menu:Run Ansible Playbook via[Run playbook via ansible-navigator run]. ++ +The output is displayed in the *Terminal* tab of the {VSCode} terminal. +The *Successful* status indicates that the playbook ran successfully. ++ +image:devtools-extension-navigator-output.png[Output for ansible-navigator execution] +. Enter the number next to a play to step into the play results. +The example playbook only contains one play. +Enter `0` to view the status of the tasks executed in the play. ++ +image:devtools-extension-navigator-tasks.png[Tasks in ansible-navigator output] ++ +Type the number next to a task to review the task results. + +For more information on running playbooks with {Navigator}, see +link:{URLNavigatorGuide}/assembly-execute-playbooks-navigator_ansible-navigator#proc-execute-playbook-tui_execute-playbooks-navigator[Executing a playbook from automation content navigator] +in the _{TitleNavigatorGuide}_ Guide. + diff --git a/downstream/modules/devtools/proc-devtools-extension-run-ansible-playbook.adoc b/downstream/modules/devtools/proc-devtools-extension-run-ansible-playbook.adoc new file mode 100644 index 0000000000..ed06f34572 --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-extension-run-ansible-playbook.adoc @@ -0,0 +1,15 @@ +[id="extension-run-ansible-playbook_{context}"] + += Running your playbook with `ansible-playbook` + +.Procedure + +* To run a playbook, right-click the playbook name in the *Explorer* pane, then select menu:Run Ansible Playbook via[Run playbook via `ansible-playbook`]. ++ +image:ansible-playbook-run.png[Run playbook via ansible-playbook] + +The output is displayed in the *Terminal* tab of the {VSCode} terminal. +The `ok=2` and `failed=0` messages indicate that the playbook ran successfully. + +image:ansible-playbook-success.png[Success message for ansible-playbook execution] + diff --git a/downstream/modules/devtools/proc-devtools-extension-set-language.adoc b/downstream/modules/devtools/proc-devtools-extension-set-language.adoc new file mode 100644 index 0000000000..cbd8bcddc7 --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-extension-set-language.adoc @@ -0,0 +1,43 @@ +[id="devtools-extension-set-language_{context}"] + += Associating the Ansible language to YAML files + +[role="_abstract"] + +The Ansible {VSCode} extension works only when the language associated with a file is set to Ansible. +The extension provides features that help create Ansible playbooks, such as auto-completion, hover, and diagnostics. + +The Ansible {VSCode} extension automatically associates the Ansible language with some files. +The procedures below describe how to set the language for files that are not recognized as Ansible files. + +.Manually associating the Ansible language to YAML files + +The following procedure describes how to manually assign the Ansible language to a YAML file that is open in {VSCode}. + +. Open or create a YAML file in {VSCode}. +. Hover the cursor over the language identified in the status bar at the bottom of the {VSCode} window to open the *Select Language Mode* list. +. Select *Ansible* in the list. ++ +The language shown in the status bar at the bottom of the {VSCode} window for the file is changed to Ansible. + +.Adding persistent file association for the Ansible language to `settings.json` + +Alternatively, you can add file association for the Ansible language in your `settings.json` file. + +. Open the `settings.json` file: +.. Click menu:View[Command Palette] to open the command palette. +.. Enter `Workspace settings` in the search box and select *Open Workspace Settings (JSON)*. +. Add the following code to `settings.json`. ++ +---- +{ + ... + + "files.associations": { + "*plays.yml": "ansible", + "*init.yml": "yaml", + } +} +---- + + diff --git a/downstream/modules/devtools/proc-devtools-extension-settings.adoc b/downstream/modules/devtools/proc-devtools-extension-settings.adoc new file mode 100644 index 0000000000..ec157d0953 --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-extension-settings.adoc @@ -0,0 +1,37 @@ +[id="devtools-extension-settings_{context}"] + += Configuring Ansible extension settings + +[role="_abstract"] + +The Ansible extension supports multiple configuration options. + +You can configure the settings for the extension on a user level, on a workspace level, or for a particular directory. +User-based settings are applied globally for any instance of VS Code that is opened. +Workspace settings are stored within your workspace and only apply when the current workspace is opened. + +It is useful to configure settings for your workspace for the following reasons: + +* If you define and maintain configurations specific to your playbook project, +you can customize your Ansible development environment for individual projects without altering your preferred setup for other work. +You can have different settings for a Python project, an Ansible project, and a C++ project, each optimized for the respective stack without the need to manually reconfigure settings each time you switch projects. +* If you include workspace settings when setting up version control for a project you want to share with your team, everyone uses the same configuration for that project. + +.Procedure + +. Open the Ansible extension settings: +.. Click the 'Extensions' icon in the activity bar. +.. Select the Ansible extension, and click the 'gear' icon and then *Extension Settings* to display the extension settings. ++ +Alternatively, click menu:Code[Settings>Settings] to open the *Settings* page. +.. Enter `Ansible` in the search bar to display the settings for the extension. +. Select the *Workspace* tab to configure your settings for the current {VSCode} workspace. +. The Ansible extension settings are pre-populated. +Modify the settings to suit your requirements: +** Check the menu:Ansible[Validation > Lint: Enabled] box to enable ansible-lint. +** Check the `Ansible Execution Environment: Enabled` box to use an {ExecEnvShort}. +** Specify the {ExecEnvShort} image you want to use in the *Ansible > Execution Environment: image* field. +** To use {LightspeedShortName}, check the *Ansible > Lightspeed: Enabled* box, and enter the URL for Lightspeed. + +The settings are documented on the link:https://marketplace.visualstudio.com/items?itemName=redhat.ansible[Ansible {VSCode} Extension by Red Hat page] in the VisualStudio marketplace documentation. + diff --git a/downstream/modules/devtools/proc-devtools-inspect-playbook.adoc b/downstream/modules/devtools/proc-devtools-inspect-playbook.adoc new file mode 100644 index 0000000000..e030590516 --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-inspect-playbook.adoc @@ -0,0 +1,26 @@ +[id="inspect-playbook_context_{context}"] + += Inspecting your playbook + +[role="_abstract"] +The Ansible {VSCode} extension provides syntax highlighting and assists you with indentation in `.yml` files. + +The following rules apply for playbook files: + +* Every playbook file must finish with a blank line. +* Trailing spaces at the end of lines are not allowed. +* Every playbook and every play require an identifier (name). + +== Inline help + +The Ansible extension also provides inline help when you are editing your playbook file. + +* If you hover your mouse over a keyword or a module name, the Ansible extension provides documentation: ++ +image::ansible-lint-keyword-help.png[Ansible-lint showing no errors in a playbook] +* If you begin to type the name of a module, for example `ansible.builtin.ping`, the extension provides a list of suggestions. ++ +Select one of the suggestions to autocomplete the line. ++ +image::ansible-lint-module-completion.png[Ansible-lint showing no errors in a playbook] + diff --git a/downstream/modules/devtools/proc-devtools-install-container.adoc b/downstream/modules/devtools/proc-devtools-install-container.adoc new file mode 100644 index 0000000000..956ed2ea00 --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-install-container.adoc @@ -0,0 +1,58 @@ +[id="devtools-install-container_{context}"] + += Installing {ToolsName} on a container inside {VSCode} + +The Dev Containers {VSCode} extension requires a `.devcontainer` file to store settings for your dev containers. +You must create a config file for your dev container and reopen your directory in a container in {VSCode}. + +.Prerequisites + +* You have installed a containerization platform, for example Podman, Podman Desktop, Docker, or Docker Desktop. +* You have a Red Hat login and you have logged in to the Red Hat registry at `registry.redhat.io`. +For information about logging in to `registry.redhat.io`, see +xref:devtools-setup-registry-redhat-io_installing-devtools[Authenticating with the Red Hat container registry]. +* You have installed {VSCode}. +* You have installed the Ansible extension in {VSCode}. +* You have installed the link:https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers[Microsoft Dev Containers] extension in {VSCode}. +* If you are installing {ToolsName} on Windows, launch {VSCode} and connect to the WSL machine: +.. Click the `Remote` (image:vscode-remote-icon.png[Remote,15,15]) icon. +.. In the dropdown menu that appears, select the option to connect to the WSL machine. + +.Procedure + +. In {VSCode}, open the directory where you want to store the configuration files for your development container. +. Create a subdirectory called `.devcontainer`. +. In the `.devcontainer` directory, create a file called `devcontainer.json`. ++ +You must use different settings depending on whether you are using Podman or Docker. + +** If you are using Podman or Podman desktop, add the following text to `devcontainer.json`: ++ +include::snippets/podman-devcontainer.json[] +** If you are using Docker or Docker desktop, add the following text to `devcontainer.json`: ++ +include::snippets/docker-devcontainer.json[] +. Reopen the directory in a container. +** If {VSCode} detects that your directory contains the `devcontainer.json` file, the following notification appears: ++ +image::devtools-reopen-in-container.png[Reopen in container] ++ +Click *Reopen in Container*. +** If the notification does not appear, click the `Remote` (image:vscode-remote-icon.png[Remote,15,15]) icon. In the dropdown menu that appears, select *Reopen in Container*. + +The *Remote ()* status in the {VSCode} Status bar displays `opening Remote` and a notification indicates the progress in opening the container. + +.Verification +When the directory reopens in a container, the *Remote ()* status displays `Dev Container: ansible-dev-container`. + + +[NOTE] +==== +The base image for the container is a Universal Base Image Minimal (UBI Minimal) image that uses `microdnf` as a package manager. +The `dnf` and `yum` package managers are not available in the container. + +For information about using `microdnf` in containers based on UBI Minimal images, see +link:https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/9/html/building_running_and_managing_containers/assembly_adding-software-to-a-ubi-container_building-running-and-managing-containers#proc_adding-software-in-a-minimal-ubi-container_assembly_adding-software-to-a-ubi-container[Adding software in a minimal UBI container] +in the Red Hat Enterprise Linux _Building, running, and managing containers_ guide. +==== + diff --git a/downstream/modules/devtools/proc-devtools-install-podman-desktop-wsl.adoc b/downstream/modules/devtools/proc-devtools-install-podman-desktop-wsl.adoc new file mode 100644 index 0000000000..0348f11bab --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-install-podman-desktop-wsl.adoc @@ -0,0 +1,101 @@ +[id="devtools-install-podman-desktop-wsl_{context}"] + += Requirements for {ToolsName} on Windows + +[role="_abstract"] +If you are installing {ToolsName} on a container in {VSCode} on Windows, there are extra requirements: + +* Windows Subsystem for Linux(WSL2) +* Podman Desktop + +== Installing WSL + +. Install WSL2 without a distribution: ++ +---- +$ `wsl --install --no-distribution` +---- +. Use `cgroupsv2` by disabling `cgroupsv1` for WSL2: ++ +Edit the `%USERPROFILE%/wsl.conf` file and add the following lines to force `cgroupv2` usage: ++ +---- +[wsl2] +kernelCommandLine = cgroup_no_v1="all" +---- + +== Installing Podman Desktop on a Windows machine + +. Install Podman Desktop. Follow the instructions in +link:https://podman-desktop.io/docs/installation/windows-install[Installing Podman Desktop and Podman on Windows] +in the Podman Desktop documentation. ++ +You do not need to change the default settings in the set-up wizard. +. Ensure the podman machine is using `cgroupsv2`: ++ +---- +$ podman info | findstr cgroup +---- +. Test Podman Desktop: ++ +---- +$ podman run hello +---- + +== Configuring settings for Podman Desktop + +. Add a `%USERPROFILE%\bin\docker.bat` file with the following content: ++ +---- +@echo off +podman %* +---- ++ +This avoids having to install Docker as required by the {VSCode} `Dev Container` extension. +. Add the `%USERPROFILE%\bin` directory to the `PATH`. +.. Select *Settings* and search for "Edit environment variables for your account" to display all of the user environment variables. +.. Highlight "Path" in the top user variables box, click btn:[Edit] and add the path. +.. Click btn:[Save] to set the path for any new console that you open. + + +// https://podman-desktop.io/docs/installation/windows-install + +// Moved to general requirements section +// == Configuring the `Dev Containers` extension +// +// . Replace docker with podman in the `Dev Containers` extension settings: +// .. In {VSCode}, open the settings editor. +// .. Search for `@ext:ms-vscode-remote.remote-containers`. +// + +// Alternatively, click the *Extensions* icon in the activity bar and click the gear icon for the `Dev Containers` extension. +// . Set `Dev > Containers:Docker Path` to `podman`. +// . Set `Dev > Containers:Docker Compose Path` to `podman-compose`. + +// == Adding the .devcontainer file +// +// . Click the `Remote` (image:vscode-remote-icon.png[Remote,15,15]) icon. +// + In the dropdown menu that appears, select the option to connect to the WSL machine. +// . Open a terminal window in {VSCode}. +// . Create a .devcontainer directory: +// + +// ---- +// $ mkdir .devcontainer +// ---- +// . Create a `devcontainer.json` file: +// + +// ---- +// $ touch .devcontainer/devcontainer.json +// ---- +// . Add the following code to the `.devcontainer/devcontainer.json` file: +// + +// include::snippets/podman-devcontainer.json[] +// . Log in to the Red Hat registry: +// ---- +// $ podman login registry.redhat.io +// ---- +// . Click the `Remote` (image:vscode-remote-icon.png[Remote,15,15]) icon. In the dropdown menu that appears, select *Reopen in Container*. +// +// .Verification +// +// When the directory reopens in a container, the *Remote ()* status displays `Dev Container: ansible-dev-container`. + diff --git a/downstream/modules/devtools/proc-devtools-install.adoc b/downstream/modules/devtools/proc-devtools-install-rpm.adoc similarity index 74% rename from downstream/modules/devtools/proc-devtools-install.adoc rename to downstream/modules/devtools/proc-devtools-install-rpm.adoc index 5548275f93..fc3b4b5781 100644 --- a/downstream/modules/devtools/proc-devtools-install.adoc +++ b/downstream/modules/devtools/proc-devtools-install-rpm.adoc @@ -1,16 +1,17 @@ -[id="devtools-install_context"] +[id="devtools-install_{context}"] -= Installing {ToolsName} from an RPM package += Installing {ToolsName} from a package on RHEL [role="_abstract"] {ToolsName} is bundled in the {PlatformNameShort} RPM (Red Hat Package Manager) package. -// As an {PlatformNameShort} administrator, you can install {ToolsName} when you are installing {PlatformNameShort}. -Refer to the {PlatformNameShort} guide for more information on installing {PlatformNameShort}. +Refer to the _link:{LinkInstallationGuide}_ documentation for information on installing {PlatformNameShort}. .Prerequisites -* You have installed RHEL + +* You have installed RHEL. * You have registered your system with Red Hat Subscription Manager. +* You have installed a containerization platform, for example Podman or Docker. .Procedure @@ -73,15 +74,16 @@ On successful installation, you can view the help documentation for ansible-crea ---- $ ansible-creator --help -usage: ansible-creator [-h] [--version] {init} ... +usage: ansible-creator [-h] [--version] command ... -Tool to scaffold Ansible Content. Get started by looking at the help text. +The fastest way to generate all your ansible content. -options: - -h, --help show this help message and exit - --version Print ansible-creator version and exit. +Positional arguments: + command + add Add resources to an existing Ansible project. + init Initialize a new Ansible project. -Commands: - {init} The subcommand to invoke. - init Initialize an Ansible Collection. +Options: + --version Print ansible-creator version and exit. + -h --help Show this help message and exit ---- diff --git a/downstream/modules/devtools/proc-devtools-install-vsc.adoc b/downstream/modules/devtools/proc-devtools-install-vsc.adoc new file mode 100644 index 0000000000..f0f5c816be --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-install-vsc.adoc @@ -0,0 +1,8 @@ +[id="devtools-install-vsc_{context}"] + += Installing {VScode} + +[role="_abstract"] + +* To install {VScode}, follow the instructions on the link:https://code.visualstudio.com/download[Download Visual Studio Code page] in the Visual Studio Code documentation. + diff --git a/downstream/modules/devtools/proc-devtools-install-vscode-extension.adoc b/downstream/modules/devtools/proc-devtools-install-vscode-extension.adoc new file mode 100644 index 0000000000..59373abf84 --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-install-vscode-extension.adoc @@ -0,0 +1,33 @@ +[id="devtools-install-extension_{context}"] + += Installing the {VSCode} Ansible extension + +[role="_abstract"] + +The Ansible extension adds language support for Ansible to {VSCode}. +It incorporates {ToolsName} to facilitate creating and running automation content. + +For a full description of the Ansible extension, see the link:https://marketplace.visualstudio.com/items?itemName=redhat.ansible[Visual Studio Code Marketplace]. + +See link:https://red.ht/aap-lp-vscode-essentials[Learning path - Getting Started with the Ansible {VSCode} Extension] for tutorials on working with the extension. + +To install the Ansible {VSCode} extension: + +. Open {VSCode}. +. Click the *Extensions* (image:vscode-extensions-icon.png[Extensions,15,15]) icon in the Activity Bar, or click menu:View[Extensions], to display the *Extensions* view. +. In the search field in the *Extensions* view, type `Ansible Red Hat`. +. Select the Ansible extension and click btn:[Install]. + +When the language for a file is recognized as Ansible, the Ansible extension provides features such as auto-completion, hover, diagnostics, and goto. +The language identified for a file is displayed in the Status bar at the bottom of the {VSCode} window. + +The following files are assigned the Ansible language: + +* YAML files in a `/playbooks` directory +* Files with the following double extension: `.ansible.yml` or `.ansible.yaml` +* Certain YAML names recognized by Ansible, for example `site.yml` or `site.yaml` +* YAML files whose filename contains "playbook": `*playbook*.yml` or `*playbook*.yaml` + +If the extension does not identify the language for your playbook files as Ansible, follow the procedure in +xref:devtools-extension-set-language_installing-devtools[Associating the Ansible language to YAML files]. + diff --git a/downstream/modules/devtools/proc-devtools-migrate-existing-roles-collection.adoc b/downstream/modules/devtools/proc-devtools-migrate-existing-roles-collection.adoc new file mode 100644 index 0000000000..89d4cb6878 --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-migrate-existing-roles-collection.adoc @@ -0,0 +1,89 @@ +:_mod-docs-content-type: PROCEDURE + +[id="devtools-migrate-existing-roles-collection_{context}"] += Migrating existing roles to your collection + +The directory for a standalone role has the following structure. +Your role might not contain all of these directories. + +---- +my_role +├── README.md +├── defaults +│ └── main.yml +├── files +├── handlers +│ └── main.yml +├── meta +│ └── main.yml +├── tasks +│ └── main.yml +├── templates +├── tests +│ ├── inventory +│ └── test.yml +└── vars + └── main.yml + +---- + +An Ansible role has a defined directory structure with seven main standard directories. +Each role must must include at least one of these directories. +You can omit any directories the role does not use. +Each directory contains a `main.yml` file. + +.Procedure + +. If necessary, rename the directory that contains your role to reflect its content, for example, `acl_config` or `tacacs`. ++ +Roles in collections cannot have hyphens in their names. Use the underscore character (`_`) instead. +. Copy the roles directories from your standalone role into the `roles/` directory in your collection. ++ +For example, in a collection called `myapp_network`, add your roles to the `myapp_network/roles/` directory. +. Copy any plug-ins from your standalone roles into the `plugins directory/` for your new collection. +The collection directory structure resembles the following. ++ +---- +company_namespace +└── myapp_network + ├── ... + ├── galaxy.yml + ├── docs + ├── extensions + ├── meta + ├── plugins + ├── roles + │ ├── acl_config + │ │ ├── README.md + │ │ ├── defaults + │ │ ├── files + │ │ ├── handlers + │ │ ├── meta + │ │ ├── tasks + │ │ ├── templates + │ │ ├── tests + │ │ └── vars + │ └── tacacs + │ ├── README.md + │ ├── default + │ ├── files + │ ├── handlers + │ ├── meta + │ ├── tasks + │ ├── templates + │ ├── tests + │ └── vars + │ ├── run + ├── ... + ├── tests + └── vars + +---- ++ +The `run` role is a default role directory that is created when you scaffold the collection. +. Update your playbooks to use the fully qualified collection name (FQDN) for your new roles in your collection. + +Not every standalone role will seamlessly integrate into your collection without modification of the code. +For example, if a third-party standalone role from Galaxy that contains a plug-in uses the `module_utils/` directory, +then the plug-in itself has import statements. + diff --git a/downstream/modules/devtools/proc-devtools-molecule-test-roles-collection.adoc b/downstream/modules/devtools/proc-devtools-molecule-test-roles-collection.adoc new file mode 100644 index 0000000000..be8c54d156 --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-molecule-test-roles-collection.adoc @@ -0,0 +1,41 @@ +:_mod-docs-content-type: PROCEDURE + +[id="devtools-molecule-test-roles-collection_{context}"] += Using Molecule to test your roles + +It is useful to run your automation content in a test environment before using it to automate production infrastructure. +Testing ensures the automation works as designed. + +Molecule is a complete testing framework designed to help you automate the testing of Ansible roles in different environments, +ensuring that the roles behave as expected across various platforms and configurations. + +A Molecule scenario is a set of configurations and tests for roles within a collection. +You can have as many scenarios as you like and Molecule will run one after the other. + +* `molecule.yml` is the central configuration entry point for Molecule per scenario. +With this file, you can configure each tool that Molecule will employ when testing your role. +* `create.yml` is a playbook file used for creating the instances and storing data in `instance-config`. +* `converge.yml` is the playbook file that contains the call for your role. +Molecule will invoke this playbook with `ansible-playbook` and run it against an instance created by the driver. +* `destroy.yml` contains the Ansible code for destroying the instances and removing them from `instance-config`. + +. Navigate to the `extensions/` directory in your collection and initialize a new default molecule scenario: ++ +---- +molecule init scenario + +--- +- name: Include a role from a collection + hosts: localhost + gather_facts: false + tasks: + - name: Testing role + ansible.builtin.include_role: + name: foo.bar.my_role + tasks_from: main.yml + +---- + +// https://www.ansible.com/blog/developing-and-testing-ansible-roles-with-molecule-and-podman-part-1/ +// https://www.ansible.com/blog/developing-and-testing-ansible-roles-with-molecule-and-podman-part-2/ + diff --git a/downstream/modules/devtools/proc-devtools-ms-dev-containers-ext.adoc b/downstream/modules/devtools/proc-devtools-ms-dev-containers-ext.adoc new file mode 100644 index 0000000000..b3cacdfe7f --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-ms-dev-containers-ext.adoc @@ -0,0 +1,23 @@ +[id="devtools-ms-dev-containers-ext_{context}"] + += Installing and configuring the `Dev Containers` extension + +If you are installing the containerized version of {ToolsName}, you must install the +link:https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers[Microsoft Dev Containers] +extension in {VSCode}. + +. Open {VSCode}. +. Click the *Extensions* (image:vscode-extensions-icon.png[Extensions,15,15]) icon in the Activity Bar, or click menu:View[Extensions], to display the *Extensions* view. +. In the search field in the *Extensions* view, type `Dev Containers`. +. Select the Dev Containers extension from Microsoft and click btn:[Install]. + +If you are using Podman or Podman Desktop as your containerization platform, you must modify the default settings in the `Dev Containers` extension. + +. Replace docker with podman in the `Dev Containers` extension settings: +.. In {VSCode}, open the settings editor. +.. Search for `@ext:ms-vscode-remote.remote-containers`. ++ +Alternatively, click the *Extensions* icon in the activity bar and click the gear icon for the `Dev Containers` extension. +. Set `Dev > Containers:Docker Path` to `podman`. +. Set `Dev > Containers:Docker Compose Path` to `podman-compose`. + diff --git a/downstream/modules/devtools/proc-devtools-publish-roles-collection-pah.adoc b/downstream/modules/devtools/proc-devtools-publish-roles-collection-pah.adoc new file mode 100644 index 0000000000..8a06fcb597 --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-publish-roles-collection-pah.adoc @@ -0,0 +1,29 @@ +:_mod-docs-content-type: PROCEDURE + +[id="devtools-publish-roles-collection-pah_{context}"] += Publishing your collection in {PrivateHubName} + +. Prerequisite + +* Package your collection into a tarball. +Format your collection file name as follows: + +`` + +For example, `company_namespace-myapp_network-1.0.0.tar.gz` + +.Procedure + +. Create a namespace for your collection in {PrivateHubName}. See +link:{URLHubManagingContent}/managing-collections-hub#proc-create-namespace[Creating a namespace] +in the _{TitleHubManagingContent}_ guide. +. Optional: Add information to your namespace. See +link:{URLHubManagingContent}/managing-collections-hub#proc-edit-namespace[Adding additional information and resources to a namespace] +in the _{TitleHubManagingContent}_ guide. +. Upload your roles collections tarballs to your namespace. See +link:{URLHubManagingContent}/managing-collections-hub#proc-uploading-collections[Uploading collections to your namespaces] +in the _{TitleHubManagingContent}_ guide. +. Approve your collection for internal publication. See +link:{URLHubManagingContent}/managing-collections-hub#proc-approve-collection[Uploading collections to your namespaces] +in the _{TitleHubManagingContent}_ guide. + diff --git a/downstream/modules/devtools/proc-devtools-run-playbook-extension.adoc b/downstream/modules/devtools/proc-devtools-run-playbook-extension.adoc new file mode 100644 index 0000000000..14b60a3d7b --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-run-playbook-extension.adoc @@ -0,0 +1,12 @@ +[id="running-playbook-extension_{context}"] + += Running your playbook + +[role="_abstract"] + +The Ansible {VSCode} extension provides two options to run your playbook: + +* `ansible-playbook` runs the playbook on your local machine using Ansible Core. +* `ansible-navigator` runs the playbook in an execution environment in the same manner that {PlatformNameShort} runs an automation job. +You specify the base image for the execution environment in the Ansible extension settings. + diff --git a/downstream/modules/devtools/proc-devtools-run-roles-collection.adoc b/downstream/modules/devtools/proc-devtools-run-roles-collection.adoc new file mode 100644 index 0000000000..b72467f11d --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-run-roles-collection.adoc @@ -0,0 +1,28 @@ +:_mod-docs-content-type: PROCEDURE + +[id="devtools-run-roles-collection_{context}"] += Running and testing your collection + +the Ansible VS +When you are developing your roles, you can use `ansible-lint` in the Ansible +extension to display potential rule violations in the terminal in {VSCode}. + +When you package your collection and install it into your playbook projects, +the code extension autocomplete feature is available for your collection. +// This helps you write functional playbooks. + +If you have your Ansible collection PATH set, you can use Ansible Navigator +to browse your collection and its contents. + +Use ansible-navigator to run your playbooks because it is useful for troubleshooting. +You can explore the output at various depth levels. + +== Using your collection in playbooks + +// +Collection path + +// == Navigator + +Autocompletion in playbooks - connect scaffolding of both projects + diff --git a/downstream/modules/devtools/proc-devtools-save-scm.adoc b/downstream/modules/devtools/proc-devtools-save-scm.adoc new file mode 100644 index 0000000000..77df62e906 --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-save-scm.adoc @@ -0,0 +1,11 @@ +[id="devtools-save-scm_{context}"] + += Saving your project in SCM + +Save your playbook project as a repository in your source control management system, for example GitHub. + +.Procedure + +. Initialize your project directory as a git repository. +. Push your project up to a source control system such as GitHub. + diff --git a/downstream/modules/devtools/proc-devtools-scaffold-roles-collection.adoc b/downstream/modules/devtools/proc-devtools-scaffold-roles-collection.adoc new file mode 100644 index 0000000000..211ff54afb --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-scaffold-roles-collection.adoc @@ -0,0 +1,80 @@ +:_newdoc-version: 2.18.3 +:_template-generated: 2024-09-26 +:_mod-docs-content-type: PROCEDURE + +[id="devtools-scaffold-roles-collection_{context}"] += Scaffolding a collection for your roles + +You can scaffold a collection for your roles from the Ansible extension in {VSCode}. + +.Procedure + +. Open {VSCode}. +. Navigate to the directory where you want to create your roles collection. +. Click the Ansible icon in the {VSCode} activity bar to open the Ansible extension. +. Select *Get started* in the *Ansible content creator* section. ++ +The *Ansible content creator* tab opens. +. In the *Create* section, click *Ansible collection project*. ++ +The *Create new Ansible project* tab opens. +. In the form in the *Create Ansible project* tab, enter the following: +** *Namespace*: Enter a name for your namespace, for example `company_namespace`. +** *Collection*: Enter a name for your collection, for example, `myapp_network`. +** *Init path*: Enter the path to the directory where you want to scaffold your new collection. ++ +If you enter an existing directory name, the scaffolding process overwrites the contents of that directory. +The scaffold process only allows you to use an existing directory if you enable the Force option. + +*** If you are using the containerized version of Ansible development tools, +the destination directory path is relative to the container, not a path in your local system. +To discover the current directory name in the container, run the pwd command in a terminal in {VSCode}. +If the current directory in the container is `workspaces`, enter `workspaces//collections`. +*** If you are using a locally installed version of Ansible Dev tools, +enter the full path to the directory, for example `/user//path/to/`. +. Click btn:[Create]. + +.Verification + +The following message appears in the *Logs* pane of the *Create Ansible collection* tab. +// In this example, the destination directory name is + +---- +--------------------- ansible-creator logs --------------------- + + Note: collection company_namespace.myapp_network created at /path/to/collections/directory +---- + +The following directories and files are created in your `collections/` directory: + +---- +├── .devcontainer +├── .github +├── .gitignore +├── .isort.cfg +├── .pre-commit-config.yaml +├── .prettierignore +├── .vscode +├── CHANGELOG.rst +├── CODE_OF_CONDUCT.md +├── CONTRIBUTING +├── LICENSE +├── MAINTAINERS +├── README.md +├── changelogs +├── devfile.yaml +├── docs +├── extensions +├── galaxy.yml +├── meta +├── plugins +├── pyproject.toml +├── requirements.txt +├── roles +├── test-requirements.txt +├── tests +└── tox-ansible.ini + +---- + + diff --git a/downstream/modules/devtools/proc-devtools-set-up-ansible-config.adoc b/downstream/modules/devtools/proc-devtools-set-up-ansible-config.adoc new file mode 100644 index 0000000000..ac48abe4fa --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-set-up-ansible-config.adoc @@ -0,0 +1,15 @@ +[id="devtools-set-up-ansible-config_{context}"] + += Setting up the Ansible configuration file for your playbook project + +[role="_abstract"] +When you scaffolded your playbook project, an Ansible configuration file, `ansible.cfg`, +was added to the root directory of your project. + +If you have configured a default Ansible configuration file in `/etc/ansible/ansible.cfg`, +copy any settings that you want to reuse in your project from your default Ansible configuration file +to the `ansible.cfg` file in your project's root directory. + +To learn more about the Ansible configuration file, see +link:https://docs.ansible.com/ansible/latest/reference_appendices/config.html[Ansible Configuration Settings] +in the Ansible documentation. diff --git a/downstream/modules/devtools/proc-devtools-setup-registry-redhat-io.adoc b/downstream/modules/devtools/proc-devtools-setup-registry-redhat-io.adoc new file mode 100644 index 0000000000..91fe78efa3 --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-setup-registry-redhat-io.adoc @@ -0,0 +1,49 @@ +[id="devtools-setup-registry-redhat-io_{context}"] + += Authenticating with the Red Hat container registry + +[role="_abstract"] +All container images available through the Red Hat container catalog are hosted on an image registry, +`registry.redhat.io`. +The registry requires authentication for access to images. + +To use the `registry.redhat.io` registry, you must have a Red Hat login. +This is the same account that you use to log in to the Red Hat Customer Portal (access.redhat.com) and manage your Red Hat subscriptions. + +[NOTE] +==== +If you are planning to install the {ToolsName} on a container inside {VSCode}, +you must log in to `registry.redhat.io` before launching {VSCode} so that {VSCode} can pull the +`devtools` container from `registry.redhat.io`. + +If you are running {ToolsName} on a container inside {VSCode} and you want to pull execution environments +or the `devcontainer` to use as an execution environment, +you must log in from a terminal prompt within the `devcontainer` from a terminal inside {VSCode}. +==== + +You can use the `podman login` or `docker login` commands with your credentials to access content on the registry. + +Podman:: ++ +---- +$ podman login registry.redhat.io +Username: my__redhat_username +Password: *********** +---- +Docker:: ++ +---- +$ docker login registry.redhat.io +Username: my__redhat_username +Password: *********** +---- + +For more information about Red Hat container registry authentication, see +link:https://access.redhat.com/RegistryAuthentication[Red Hat Container Registry Authentication] +on the Red Hat customer portal. + +// * If you are an organization administrator, you can create profiles for users in your organization and configure Red Hat customer portal access permissions for them. +// Refer to link:https://access.redhat.com/start/learn:get-set-red-hat/resource/resources:create-and-manage-other-users[Create and manage other users] on the Red Hat customer portal for information. +// * If you are a member of an organization, ask your administrator to create a Red Hat customer portal account for you. +//Troubleshooting link:https://access.redhat.com/articles/3560571[Troubleshooting Authentication Issues with `registry.redhat.io`] + diff --git a/downstream/modules/devtools/proc-devtools-testing-playbook.adoc b/downstream/modules/devtools/proc-devtools-testing-playbook.adoc new file mode 100644 index 0000000000..4edecb7935 --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-testing-playbook.adoc @@ -0,0 +1,31 @@ +[id="test-playbook_{context}"] + += Testing your playbooks + +[role="_abstract"] + +To test your playbooks in your project, run them in a non-production environment such as a lab setup or a virtual machine. + +{NavigatorStart} (`ansible-navigator`) is a text-based user interface (TUI) for developing and troubleshooting Ansible content with execution environments. + +Running a playbook using `ansible-navigator` generates verbose output that you can inspect to check whether the playbook is running the way you expected. +You can specify the execution environment that you want to run your playbooks on, so that your tests replicate the production setup on {PlatformNameShort}: + +* To run a playbook on an execution environment, run the following command from the terminal in {VsCode}: ++ +---- +$ ansible-navigator run -eei +---- +For example, to execute a playbook called `site.yml` on the {PlatformNameShort} RHEL 9 minimum execution environment, run the following command from the terminal in VS Code. ++ +---- +ansible-navigator run site.yml --eei ee-minimal-rhel8 +---- + +The output is displayed in the terminal. +You can inspect the results and step into each play and task that was executed. + +For more information about running playbooks, refer to +link:{URLNavigatorGuide}/assembly-execute-playbooks-navigator_ansible-navigator[Running Ansible playbooks with automation content navigator] +in the _{TitleNavigatorGuide}_ guide. + diff --git a/downstream/modules/devtools/proc-devtools-use-roles-collections-aap.adoc b/downstream/modules/devtools/proc-devtools-use-roles-collections-aap.adoc new file mode 100644 index 0000000000..fc29b8dd2e --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-use-roles-collections-aap.adoc @@ -0,0 +1,22 @@ +:_mod-docs-content-type: PROCEDURE + +[id="devtools-use-roles-collections-aap_{context}"] += Using your collection in projects in {PlatformName} + +To use your collection in {ControllerName}, you must add your collection to an +{ExecEnvShort} and push it to {PrivateHubName}. + +The following procedure describes the workflow to add a collection to an {ExecEnvShort}. +Refer to +link:{URLBuilder}/assembly-publishing-exec-env#proc-customize-ee-image[Customizing an existing automation executions environment image] +in the _{TitleBuilder}_ guide for the commands to execute these steps. + +. You can pull an {ExecEnvShort} base image from {HubName}, +or you can add your collection to your own custom {ExecEnvShort}. +. Add the collections that you want to include in the {ExecEnvShort}. +. Build the new {ExecEnvShort}. +. Verify that the collections are in the {ExecEnvShort}. +. Tag the image and push it to {PrivateHubName}. +. Pull your new image into your {ControllerName} instance. +. The playbooks that use the roles in your collection must use the fully qualified domain name (FQDN) for the roles. + diff --git a/downstream/modules/devtools/proc-devtools-working-with-ee.adoc b/downstream/modules/devtools/proc-devtools-working-with-ee.adoc new file mode 100644 index 0000000000..e0f8491069 --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-working-with-ee.adoc @@ -0,0 +1,36 @@ +[id="working-with-ee_{context}"] + += Working with execution environments + +[role="_abstract"] + +You can view the automation execution environments provided by Red Hat in the +link:https://catalog.redhat.com/search?searchType=containers&build_categories_list=Automation%20execution%20environment&p=1[Red Hat Ecosystem Catalog]. + +Click on an execution environment for information on how to download it. + +. Log in to `registry.redhat.io` if you have not already done so. ++ +[NOTE] +==== +If you are running {ToolsName} on a container inside {VSCode} and you want to pull execution environments +or the `devcontainer` to use as an execution environment, +you must log in to `registry.redhat.io` from a terminal prompt within the `devcontainer` inside {VSCode}. +==== +. Using the information in the +link:https://catalog.redhat.com/search?searchType=containers&build_categories_list=Automation%20execution%20environment&p=1[Red Hat Ecosystem Catalog], download the execution environment you need. ++ +For example, to download the minimal RHEL 8 base image, run the following command: ++ +---- +$ podman pull registry.redhat.io/ansible-automation-platform-25/ee-minimal-rhel9 +---- + +You can build and create custom execution environments with `ansible-builder`. +For more information about working with execution environments locally, see +link:{LinkBuilder}. + +After customizing your execution environment, you can push your new image to the container registry in automation hub. See +link:{URLBuilder}/index#assembly-publishing-exec-env[Publishing an automation execution environment] +in the _{TitleBuilder}_ documentation. + diff --git a/downstream/modules/devtools/proc-devtools-writing-first-playbook.adoc b/downstream/modules/devtools/proc-devtools-writing-first-playbook.adoc new file mode 100644 index 0000000000..2a6c9c108f --- /dev/null +++ b/downstream/modules/devtools/proc-devtools-writing-first-playbook.adoc @@ -0,0 +1,35 @@ +[id="writing-playbook_{context}"] + += Writing your first playbook + +[role="_abstract"] +The instructions below describe how {ToolsName} help you to create and run your first playbook in {VSCode}. + +.Prerequisites + +* You have installed and opened the Ansible {VSCode} extension. +* You have opened a terminal in {VSCode}. +* You have installed `ansible-devtools`. + +.Procedure + +. Create a new .yml file in {VSCode} for your playbook, for example `example_playbook.yml`. Put it in the same directory level as the example `site.yml` file. +. Add the following example code into the playbook file and save the file. +The playbook consists of a single play that executes a `ping` to your local machine. ++ +---- +--- +- name: My first play + hosts: localhost + tasks: + - name: Ping my hosts + ansible.builtin.ping: + +---- ++ +`Ansible-lint` runs in the background and displays errors in the *Problems* tab of the terminal. +There are no errors in this playbook: ++ +image::ansible-lint-no-errors.png[Ansible-lint showing no errors in a playbook] +. Save your playbook file. + diff --git a/downstream/modules/devtools/proc-directory-setup.adoc b/downstream/modules/devtools/proc-directory-setup.adoc index 6bf73ecd20..3deaa52afc 100644 --- a/downstream/modules/devtools/proc-directory-setup.adoc +++ b/downstream/modules/devtools/proc-directory-setup.adoc @@ -1,4 +1,4 @@ -[id="directory-setup"] +[id="directory-setup_{context}"] = Setting up a directory for your playbooks diff --git a/downstream/modules/devtools/proc-install-vscode-extension.adoc b/downstream/modules/devtools/proc-install-vscode-extension.adoc deleted file mode 100644 index 1c40f624fe..0000000000 --- a/downstream/modules/devtools/proc-install-vscode-extension.adoc +++ /dev/null @@ -1,30 +0,0 @@ -[id="install-vscode-extension"] - -= Installing the Ansible {VSCode} extension - -[role="_abstract"] - -The Ansible extension adds language support for Ansible to {VSCode}. -It incorporates {ToolsName} to facilitate creating and running automation content. - -For a full description of the Ansible extension, see the link:https://marketplace.visualstudio.com/items?itemName=redhat.ansible[Visual Studio Code Marketplace]. - -// See link:URL[Learning path - Getting Started with the Ansible {VSCode} Extension] for interactive training on working with the extension. - -To install the Ansible {VSCode} extension: - -. Click the *Extensions* icon in the {VSCode} Activity Bar, or select menu:View[Extensions], to display the *Extensions* view. -. In the search field in the *Extensions* view, type "Ansible Red Hat". -. Select the Ansible extension and click btn:[Install]. - -The Ansible extension becomes active when you open a workspace or directory that contains one of the following files: - -* Files with a `.yml`, `.yaml`, `.ansible.yml` or `.ansible.yaml` extension. -* Common YAML filenames recognized by Ansible, such as `site.yml` -* YAML files whose names contain "playbook". - -Open a `.yml` file in your workspace. The language identified for the file is displayed in the Status bar. - -When the language for a file is recognized as Ansible, the Ansible extension provides features for creating Ansible Playbooks and task files, such as auto-completion, hover, diagnostics, and goto. - - diff --git a/downstream/modules/devtools/proc-installing-vscode.adoc b/downstream/modules/devtools/proc-installing-vscode.adoc deleted file mode 100644 index a1b40b85a5..0000000000 --- a/downstream/modules/devtools/proc-installing-vscode.adoc +++ /dev/null @@ -1,10 +0,0 @@ -[id="installing-vscode_context"] - -= Installing {VSCode} - -[role="_abstract"] - -VS Code is a free open-source code editor available on Linux, Mac, and Windows. - -To install VS Code, follow the instructions on the link:https://code.visualstudio.com/download[Download Visual Studio Code page] in the Visual Studio Code documentation. - diff --git a/downstream/modules/devtools/proc-rhdh-add-additional-scm.adoc b/downstream/modules/devtools/proc-rhdh-add-additional-scm.adoc new file mode 100644 index 0000000000..485255619c --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-add-additional-scm.adoc @@ -0,0 +1,49 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-add-additional-scm_{context}"] += Adding additional Source Control Management options + +The standard Ansible plug-ins templates are preconfigured to support GitHub Cloud. +Follow the procedure below to add support for additional Source Control Management (SCM) solutions. + +.Procedure + +. Create a fork of the link:https://github.com/ansible/ansible-rhdh-templates/blob/main/all.yaml[Ansible plug-ins software templates repository]. +. In your repository, update the `enum` and `enumNames` keys with the SCM values. +. Update the software template `action` to match your SCM type. +. Register the forked repository with your customized templates in {RHDH}. + +For example, if you wanted to add GitLab as an SCM, your software template file would look similar to the following: + +---- +... +spec: + ... + parameters: + ... + properties: + sourceControl: + title: Select source control option + type: string + description: Select the source control option for your Ansible project. + default: gitlab.com + enum: + - gitlab.com + enumNames: + - 'GitLab' +... + +---- + +Under the `steps` section, use the appropriate action for your SCM: + +---- + steps: + ... + - id: publish + name: Publish + action: publish:gitlab + ... + +---- + diff --git a/downstream/modules/devtools/proc-rhdh-add-custom-configmap.adoc b/downstream/modules/devtools/proc-rhdh-add-custom-configmap.adoc new file mode 100644 index 0000000000..c76d6e023c --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-add-custom-configmap.adoc @@ -0,0 +1,12 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-add-custom-configmap_{context}"] += Adding a custom ConfigMap + +Create a {RHDH} ConfigMap following the procedure in +link:{BaseURL}/red_hat_developer_hub/{RHDHVers}/html/administration_guide_for_red_hat_developer_hub/assembly-add-custom-app-file-openshift_admin-rhdh[Adding a custom application configuration file to Red Hat OpenShift Container Platform] +in the _Administration guide for Red Hat Developer Hub_. +The examples below use a custom ConfigMap named `app-config-rhdh` + +To edit your custom ConfigMap, log in to the OpenShift UI and navigate to menu:Select Project ( developerHubProj )[ConfigMaps > {developer-hub}-app-config > EditConfigMaps > app-config-rhdh]. + diff --git a/downstream/modules/devtools/proc-rhdh-add-devtools-container.adoc b/downstream/modules/devtools/proc-rhdh-add-devtools-container.adoc new file mode 100644 index 0000000000..3b129a1d8e --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-add-devtools-container.adoc @@ -0,0 +1,46 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-add-devtools-container_{context}"] += Adding the Ansible Developer Tools container + +You must update the Helm chart configuration to add an extra container. + +.Procedure + +. Log in to the OpenShift UI. +. Navigate to menu:Helm[developer-hub > Actions > upgrade > Yaml view] to open the Helm chart. +. Update the `extraContainers` section in the YAML file. ++ +Add the following code: ++ +---- +upstream: + backstage: + ... + extraContainers: + - command: + - adt + - server + image: >- + registry.redhat.io/ansible-automation-platform-25/ansible-dev-tools-rhel8:latest + imagePullPolicy: IfNotPresent + name: ansible-devtools-server + ports: + - containerPort: 8000 + ... +---- ++ +[NOTE] +==== +The image pull policy is `imagePullPolicy: IfNotPresent`. +The image is pulled only if it does not already exist on the node. +Update it to `imagePullPolicy: Always` if you always want to use the latest image. +==== +. Click btn:[Upgrade]. + +.Verification + +To verify that the container is running, check the container log: + +image::rhdh-check-devtools-container.png[View container log] + diff --git a/downstream/modules/devtools/proc-rhdh-add-plugin-config.adoc b/downstream/modules/devtools/proc-rhdh-add-plugin-config.adoc new file mode 100644 index 0000000000..fb270699db --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-add-plugin-config.adoc @@ -0,0 +1,76 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-add-plugin-config_{context}"] += Adding the Ansible plug-ins configuration + +. In the OpenShift Developer UI, navigate to menu:Helm[developer-hub > Actions > Upgrade > Yaml view]. +. Update the Helm chart configuration to add the dynamic plug-ins in the {RHDH} instance. +Under the `plugins` section in the YAML file, add the dynamic plug-ins that you want to enable. ++ +---- +global: + ... + plugins: + - disabled: false + integrity: + package: 'http://plugin-registry:8080/ansible-plugin-backstage-rhaap-x.y.z.tgz' + pluginConfig: + dynamicPlugins: + frontend: + ansible.plugin-backstage-rhaap: + appIcons: + - importName: AnsibleLogo + name: AnsibleLogo + dynamicRoutes: + - importName: AnsiblePage + menuItem: + icon: AnsibleLogo + text: Ansible + path: /ansible + - disabled: false + integrity: + package: >- + http://plugin-registry:8080/ansible-plugin-scaffolder-backend-module-backstage-rhaap-x.y.z.tgz + pluginConfig: + dynamicPlugins: + backend: + ansible.plugin-scaffolder-backend-module-backstage-rhaap: null + - disabled: false + integrity: + package: >- + http://plugin-registry:8080/ansible-plugin-backstage-rhaap-backend-x.y.z.tgz + pluginConfig: + dynamicPlugins: + backend: + ansible.plugin-backstage-rhaap-backend: null +---- +. In the `package` sections, replace `x.y.z` in the plug-in filenames with the correct version numbers for the Ansible plug-ins. +. For each Ansible plug-in, update the integrity values using the corresponding `.integrity` file content. +. Click btn:[Upgrade]. ++ +The developer hub pods restart and the plug-ins are installed. + +.Verification + +To verify that the plug-ins have been installed, open the `install-dynamic-plugin` container logs and check that the Ansible plug-ins are visible in {RHDH}: + +. Open the Developer perspective for the {RHDH} application in the OpenShift Web console. +. Select the *Topology* view. +. Select the {RHDH} deployment pod to open an information pane. +. Select the *Resources* tab of the information pane. +. In the *Pods* section, click *View logs* to open the *Pod details* page. +. In the *Pod details* page, select the *Logs* tab. +. Select `install-dynamic-plugins` from the drop-down list of containers to view the container log. +. In the `install-dynamic-plugin` container logs, search for the Ansible plug-ins. ++ +The following example from the log indicates a successful installation for one of the plug-ins: ++ +----- +=> Successfully installed dynamic plugin http://plugin-registry-1:8080/ansible-plugin-backstage-rhaap-1.1.0.tgz +----- ++ +The following image shows the container log in the *Pod details* page. +The version numbers and file names can differ. ++ +image::rhdh-check-plugin-config.png[container logs for install-dynamic-plugin] + diff --git a/downstream/modules/devtools/proc-rhdh-add-plugin-software-templates.adoc b/downstream/modules/devtools/proc-rhdh-add-plugin-software-templates.adoc new file mode 100644 index 0000000000..e6e8592681 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-add-plugin-software-templates.adoc @@ -0,0 +1,28 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-add-plugin-software-templates_{context}"] += Adding Ansible plug-ins software templates + +Red Hat Ansible provides software templates for {RHDH} to provision new playbooks and collection projects based on Ansible best practices. + +.Procedure + +. Edit your custom {RHDH} config map, for example `app-config-rhdh`. +. Add the following code to your {RHDH} `app-config-rhdh.yaml` file. +---- +data: + app-config-rhdh.yaml: | + catalog: + ... + locations: + ... + - type: url + target: https://github.com/ansible/ansible-rhdh-templates/blob/main/all.yaml + rules: + - allow: [Template] +---- + +For more information, refer to the +link:{BaseURL}/red_hat_developer_hub/1.2/html-single/administration_guide_for_red_hat_developer_hub/assembly-admin-templates#assembly-admin-templates[Managing templates] +section of the _Administration guide for Red Hat Developer Hub_. + diff --git a/downstream/modules/devtools/proc-rhdh-add-pull-secret-helm.adoc b/downstream/modules/devtools/proc-rhdh-add-pull-secret-helm.adoc new file mode 100644 index 0000000000..8b17784177 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-add-pull-secret-helm.adoc @@ -0,0 +1,30 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-add-pull-secret-helm_{context}"] += Adding a pull secret to the {RHDH} Helm configuration + +.Prerequisite + +The Ansible Development Container download requires a Red Hat Customer Portal account and Red Hat Service Registry account. + +.Procedure + +. Create a new link:https://access.redhat.com/terms-based-registry/[Red Hat Registry Service account], if required. +. Click the token name under the *Account name* column. +. Select the *OpenShift Secret* tab and follow the instructions to add the pull secret to your {RHDH} OpenShift project. +. Add the new secret to the {RHDH} Helm configuration, replacing `` with the name of the secret you generated on the Red Hat Registry Service Account website: ++ +---- +upstream: + backstage: + ... + image: + ... + pullSecrets: + - + ... + +---- + +For more information, refer to the link:https://access.redhat.com/RegistryAuthentication[Red Hat Container Registry documentation]. + diff --git a/downstream/modules/devtools/proc-rhdh-backup-operator-configmap.adoc b/downstream/modules/devtools/proc-rhdh-backup-operator-configmap.adoc new file mode 100644 index 0000000000..748ee90ee3 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-backup-operator-configmap.adoc @@ -0,0 +1,26 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-backup-operator-configmap_{context}"] += Backing up your {RHDHShort} Operator ConfigMap + +Before you install {AAPRHDH}, create a local copy of the ConfigMap for the {RHDHShort} Operator. +You can use a section of the ConfigMap when you are populating a custom ConfigMap. + +.Procedure + +// Is export KUBECONFIG=/home/secrets/rosa/kubeconfig needed? + +. Find the namespace for your {RHDHShort} Operator. ++ +When you installed the {RHDHShort} Operator, a namespace was created for it. +Select *Topology* and look for the {RHDHShort} Operator in the *Project* dropdown list. +The default namespace is `rhdh-operator`. +. Run the following command to make a copy of the ConfigMap for your {RHDHShort} Operator, `backstage-default-config`. ++ +Replace `` with your {RHDHShort} Operator namespace, and `` with +the filename you want to use for your copy of the {RHDHShort} Operator. ++ +---- +$ oc get configmap backstage-default-config -n -o yaml > +---- + diff --git a/downstream/modules/devtools/proc-rhdh-configure-aap-details.adoc b/downstream/modules/devtools/proc-rhdh-configure-aap-details.adoc new file mode 100644 index 0000000000..26016c72ac --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-configure-aap-details.adoc @@ -0,0 +1,48 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-configure-aap-details_{context}"] += Configuring Ansible Automation Platform details + +The Ansible plug-ins query your {PlatformNameShort} subscription status with the controller API using a token. + +[NOTE] +==== +The Ansible plug-ins continue to function regardless of the {PlatformNameShort} subscription status. +==== + +.Procedure + +. Create a Personal Access Token (PAT) with “Read” scope in automation controller, following the +link:{URLCentralAuth}/gw-token-based-authentication#assembly-controller-applications[Applications] +section of _{TitleCentralAuth}_. +// 2.4 link: +// link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/assembly-controller-applications#proc-controller-apps-create-tokens[Adding tokens] +// 2.4 link: +// section of the _Automation controller user guide_. +. Edit your custom {RHDH} config map, for example `app-config-rhdh`. +. Add your {PlatformNameShort} details to `app-config-rhdh.yaml`. +.. Set the `baseURL` key with your automation controller URL. +.. Set the `token` key with the generated token value that you created in Step 1. +.. Set the `checkSSL` key to `true` or `false`. ++ +If `checkSSL` is set to `true`, the Ansible plug-ins verify whether the SSL certificate is valid. ++ +---- +data: + app-config-rhdh.yaml: | + ... + ansible: + ... + rhaap: + baseUrl: '' + token: '' + checkSSL: true +---- + +[NOTE] +==== +You are responsible for protecting your {RHDH} installation from external and unauthorized access. +Manage the backend authentication key like any other secret. +Meet strong password requirements, do not expose it in any configuration files, and only inject it into configuration files as an environment variable. +==== + diff --git a/downstream/modules/devtools/proc-rhdh-configure-devspaces.adoc b/downstream/modules/devtools/proc-rhdh-configure-devspaces.adoc new file mode 100644 index 0000000000..3904f731e8 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-configure-devspaces.adoc @@ -0,0 +1,44 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-configure-devspaces_{context}"] += Configuring OpenShift Dev Spaces + +When OpenShift Dev Spaces is configured for the Ansible plug-ins, users can click a link from the catalog item view in {RHDH} and edit their provisioned Ansible Git projects using Dev Spaces. + +[NOTE] +==== +OpenShift Dev Spaces is a separate product and it is optional. +The plug-ins will function without it. + +It is a separate Red Hat product and is not included in the {PlatformNameShort} or {RHDH} subscription. +==== + +If the OpenShift Dev Spaces link is not configured in the Ansible plug-ins, +the *Go to OpenShift Dev Spaces dashboard* link in the *DEVELOP* section of the Ansible plug-ins landing page redirects users to the +link:https://www.redhat.com/en/technologies/management/ansible/development-tools[Ansible development tools home page]. + +.Prerequisites + +* A Dev Spaces installation. +Refer to the +link:{BaseURL}/red_hat_openshift_dev_spaces/3.14/html-single/administration_guide/installing-devspaces[Installing Dev Spaces] +section of the _Red Hat OpenShift Dev Spaces Administration guide_. + +.Procedure + +. Edit your custom {RHDH} config map, for example `app-config-rhdh`. +. Add the following code to your {RHDH} `app-config-rhdh.yaml` file. ++ +---- +data: + app-config-rhdh.yaml: |- + ansible: + devSpaces: + baseUrl: >- + https:// +---- +. Replace `` with your OpenShift Dev Spaces URL. +. In the OpenShift Developer UI, select the `Red Hat Developer Hub` pod. +. Open *Actions*. +. Click *Restart rollout*. + diff --git a/downstream/modules/devtools/proc-rhdh-configure-devtools-server.adoc b/downstream/modules/devtools/proc-rhdh-configure-devtools-server.adoc new file mode 100644 index 0000000000..81ce7f9044 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-configure-devtools-server.adoc @@ -0,0 +1,29 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-configure-devtools-server_{context}"] += Configuring the Ansible Dev Tools Server + +The `creatorService` URL is required for the Ansible plug-ins to provision new projects using the provided software templates. + +.Procedure + +. Edit your custom {RHDH} config map, `app-config-rhdh`, that you created in +xref:rhdh-add-custom-configmap_rhdh-ocp-required-installation[Adding a custom ConfigMap]. +. Add the following code to your {RHDH} `app-config-rhdh.yaml` file. ++ +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: app-config-rhdh +... +data: + app-config-rhdh.yaml: |- + ansible: + creatorService: + baseUrl: 127.0.0.1 + port: '8000' +... + +---- + diff --git a/downstream/modules/devtools/proc-rhdh-configure-optional-integrations.adoc b/downstream/modules/devtools/proc-rhdh-configure-optional-integrations.adoc new file mode 100644 index 0000000000..62ded10c5f --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-configure-optional-integrations.adoc @@ -0,0 +1,13 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-configure-optional-integrations_{context}"] += Configuring Ansible plug-ins optional integrations + +The Ansible plug-ins provide integrations with {PlatformNameShort} and other optional Red Hat products. + +// Create a custom ConfigMap called `app-config-rhdh` as outlined in the +// link:{BaseURL}/red_hat_developer_hub/1.2/html-single/administration_guide_for_red_hat_developer_hub/assembly-install-rhdh-ocp#proc-add-custom-app-file-openshift-helm_assembly-install-rhdh-ocp[Adding a custom application configuration file to OpenShift Container Platform using the Helm chart] of the _Administration guide for Red Hat Developer Hub_. +// + +To edit your custom ConfigMap, log in to the OpenShift UI and navigate to menu:Select Project ( developerHubProj )[ConfigMaps > {developer-hub}-app-config-rhdh > app-config-rhdh]. + diff --git a/downstream/modules/devtools/proc-rhdh-configure-pah-url.adoc b/downstream/modules/devtools/proc-rhdh-configure-pah-url.adoc new file mode 100644 index 0000000000..218af264d3 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-configure-pah-url.adoc @@ -0,0 +1,42 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-configure-pah-url_{context}"] += Configuring the private automation hub URL + +{PrivateHubNameStart} provides a centralized, on-premise repository for certified Ansible collections, execution environments and any additional, vetted content provided by your organization. + +If the {PrivateHubName} URL is not configured in the Ansible plug-ins, users are redirected to the +link:https://console.redhat.com/ansible/automation-hub[Red Hat Hybrid Cloud Console automation hub]. + +[NOTE] +==== +The {PrivateHubName} configuration is optional but recommended. +The Ansible plug-ins will function without it. +==== + +.Prerequisites: +* A {PrivateHubName} instance. ++ +For more information on installing {PrivateHubName}, refer to the installation guides in the +link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}[{PlatformNameShort} documentation]. + +.Procedure: + +. Edit your custom {RHDH} config map, for example `app-config-rhdh`. +. Add the following code to your {RHDH} `app-config-rhdh.yaml` file. ++ +---- +data: + app-config-rhdh.yaml: |- + ansible: + ... + automationHub: + baseUrl: '' + ... + +---- +. Replace `<\https://MyOwnPAHUrl/>` with your {PrivateHubName} URL. +. In the OpenShift Developer UI, select the `Red Hat Developer Hub` pod. +. Open *Actions*. +. Click *Restart rollout*. + diff --git a/downstream/modules/devtools/proc-rhdh-configure-rbac.adoc b/downstream/modules/devtools/proc-rhdh-configure-rbac.adoc new file mode 100644 index 0000000000..820d358f13 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-configure-rbac.adoc @@ -0,0 +1,35 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-configure-rbac_{context}"] += Configuring Role Based Access Control + +{RHDH} offers Role-based Access Control (RBAC) functionality. +RBAC can then be applied to the Ansible plug-ins content. + +Assign the following roles: + +* Members of the `admin:superUsers` group can select templates in the *Create* tab of the Ansible plug-ins to create playbook and collection projects. +* Members of the `admin:users` group can view templates in the *Create* tab of the Ansible plug-ins. + +The following example adds RBAC to {RHDH}. + +---- +data: + app-config-rhdh.yaml: | + plugins: + ... + permission: + enabled: true + rbac: + admin: + users: + - name: user:default/ + superUsers: + - name: user:default/ +---- + + +For more information about permission policies and managing RBAC, refer to the +link:{BaseURL}/red_hat_developer_hub/{RHDHVers}/html-single/authorization/index[_Authorization_] +guide for Red Hat Developer Hub. + diff --git a/downstream/modules/devtools/proc-rhdh-create-custom-configmap-operator-install.adoc b/downstream/modules/devtools/proc-rhdh-create-custom-configmap-operator-install.adoc new file mode 100644 index 0000000000..589cae3465 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-create-custom-configmap-operator-install.adoc @@ -0,0 +1,72 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-create-operator-custom-configmap-operator-install_{context}"] += Creating a custom Operator ConfigMap + +Create a custom ConfigMap, for instance `rhdh-custom-config`, for your project. +For more details about creating a custom ConfigMap, see the +link:{BaseURL}/red_hat_developer_hub/{RHDHVers}/html-single/administration_guide_for_red_hat_developer_hub/index#proc-add-custom-app-config-file-ocp-operator_admin-rhdh[Adding a custom application configuration file to OpenShift Container Platform using the Operator] +in the _Administration guide for Red Hat Developer Hub_. + +Populate the ConfigMap with YAML from the backup that you made of the {RHDHShort} Operator ConfigMap. +// This enables the dynamic plug-ins specific to the backstage showcase. + +.Prerequisites + +* You have saved a backup copy of the Configmap for the {RHDHShort} Operator. + +.Procedure + +. In the OpenShift web console, navigate to the project you created. +. Click *ConfigMaps* in the navigation pane. +. Click *Create ConfigMap*. +. Replace the default YAML code in the new ConfigMap with the following code: ++ +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: rhdh-custom-config +data: + deployment.yaml: |- + # Replace with RHDH Operator ConfigMap deployment.yaml block here +---- +. Copy the `deployment.yaml:` section from your local copy of the RHDH Operator ConfigMap. +. Paste the `deployment.yaml:` section into the `rhdh-custom-config` ConfigMap, replacing the `deployment.yaml:` line. +. Add a sidecar container (`ansible-devtools-server`) to the list of containers under `resources` in the `deployment.spec.template.spec.[containers]` block of the ConfigMap: ++ +---- + spec: + replicas: 1 + selector: + matchLabels: + rhdh.redhat.com/app: + template: + metadata: + labels: + rhdh.redhat.com/app: + spec:\ + ... + containers: + - name: backstage-backend + ... + - resources: {} # Add sidecar container for Ansible plug-ins + terminationMessagePath: /dev/termination-log + name: ansible-devtools-server + command: + - adt + - server + ports: + - containerPort: 8000 + protocol: TCP + imagePullPolicy: IfNotPresent + terminationMessagePolicy: File + image: 'ghcr.io/ansible/community-ansible-dev-tools:latest' + +---- +. Click btn:[Create] to create the ConfigMap. + +.Verification + +To view your new ConfigMap, click *ConfigMaps* in the navigation pane. + diff --git a/downstream/modules/devtools/proc-rhdh-create-plugin-registry.adoc b/downstream/modules/devtools/proc-rhdh-create-plugin-registry.adoc new file mode 100644 index 0000000000..68b6d3d8fa --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-create-plugin-registry.adoc @@ -0,0 +1,45 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-create-plugin-registry_{context}"] += Creating a registry for the {AAPRHDHShort} + +Set up a registry in your OpenShift cluster to host the {AAPRHDHShort} and make them available for installation in {RHDH} ({RHDHShort}). + +.Procedure + +. Log in to your {OCPShort} instance with credentials to create a new application. +. Open your {RHDH} OpenShift project. ++ +---- +$ oc project +---- +. Run the following commands to create a plug-in registry build in the OpenShift cluster. ++ +---- +$ oc new-build httpd --name=plugin-registry --binary +$ oc start-build plugin-registry --from-dir=$DYNAMIC_PLUGIN_ROOT_DIR --wait +$ oc new-app --image-stream=plugin-registry +---- + +.Verification + +To verify that the plugin-registry was deployed successfully, open the *Topology* view in the *Developer* perspective on the {RHDH} application in the OpenShift Web console. + +. Click the plug-in registry to view the log. ++ +image::rhdh-plugin-registry.png[Developer perspective] ++ +(1) Developer hub instance ++ +(2) Plug-in registry +. Click the terminal tab and login to the container. +. In the terminal, run `ls` to confirm that the .tar files are in the plugin registry. ++ +---- +ansible-plugin-backstage-rhaap-x.y.z.tgz +ansible-plugin-backstage-rhaap-backend-x.y.z.tgz +ansible-plugin-scaffolder-backend-module-backstage-rhaap-x.y.z.tgz +---- ++ +The version numbers and file names can differ. + diff --git a/downstream/modules/devtools/proc-rhdh-create.adoc b/downstream/modules/devtools/proc-rhdh-create.adoc new file mode 100644 index 0000000000..a26d4155d0 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-create.adoc @@ -0,0 +1,60 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-create_{context}"] += Creating a project + +.Prerequisite + +* Ensure you have the correct access (RBAC) to view the templates in {RHDH}. +Ask your administrator to assign access to you if necessary. + +.Procedure: + +. Log in to your {RHDH} UI. +. Click the Ansible `A` icon in the {RHDH} navigation panel. +. Navigate to the *Overview* page. +. Click *Create*. +. Click *Create Ansible Git Project*. The *Available Templates* page opens. +. Click *Create Ansible Playbook project*. +. In the *Create Ansible Playbook Project* page, enter information for your new project in the form. ++ +You can see sample values for this form in the Example project. ++ +[options="header"] +|=== +|Field |Description +|Source code repository organization name or username +|The name of your source code repository username or organization name +|Playbook repository name  +|The name of your new Git repository +|Playbook description +(Optional) +|A description of the new playbook project +|Playbook project's collection namespace +|The new playbook Git project creates an example collection folder for you. +Enter a value for the collection namespace. +|Playbook project's collection name +|The name of the collection +|Catalog Owner Name +|The name of the Developer Hub catalog item owner. +This is a Red Hat Developer Hub field. +|Source code repository organization name or username +|The name of your source code repository username or organization name +|Playbook repository name +|The name of your new Git repository +|Playbook description (Optional) +|A description of the new playbook project +|System (Optional) +|This is a {RHDH} field +|=== ++ +[NOTE] +==== +Collection namespaces must follow Python module naming conventions. +Collections must have short, all lowercase names. +You can use underscores in the collection name if it improves readability. + +For more information, see the link:https://docs.ansible.com/ansible/latest/dev_guide/developing_modules_in_groups.html#naming-conventions[Ansible Collection naming conventions documentation]. +==== +. Click *Review*. + diff --git a/downstream/modules/devtools/proc-rhdh-develop-projects-devspaces.adoc b/downstream/modules/devtools/proc-rhdh-develop-projects-devspaces.adoc new file mode 100644 index 0000000000..3c2af0cc61 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-develop-projects-devspaces.adoc @@ -0,0 +1,14 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-develop-projects-devspaces_{context}"] += Developing projects on Dev Spaces + +link:https://access.redhat.com/products/red-hat-openshift-dev-spaces[OpenShift Dev Spaces] +is not included with your {PlatformNameShort} subscription or the {AAPRHDH}. + +The plug-ins provide context-aware links to edit your project in Dev Spaces. + +The Dev Spaces instance provides a default configuration that installs the Ansible VS Code extension and provides the Ansible command line tools. +You can activate Ansible Lightspeed in the Ansible VS Code extension. For more information, refer to the +link:{BaseURL}/red_hat_ansible_lightspeed_with_ibm_watsonx_code_assistant/2.x_latest/html-single/red_hat_ansible_lightspeed_with_ibm_watsonx_code_assistant_user_guide/index[Red Hat Ansible Lightspeed with IBM watsonx Code Assistant User Guide]. + diff --git a/downstream/modules/devtools/proc-rhdh-develop-projects.adoc b/downstream/modules/devtools/proc-rhdh-develop-projects.adoc new file mode 100644 index 0000000000..88b82333df --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-develop-projects.adoc @@ -0,0 +1,5 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-develop-projects_{context}"] += Developing projects + diff --git a/downstream/modules/devtools/proc-rhdh-devtools-sidecar.adoc b/downstream/modules/devtools/proc-rhdh-devtools-sidecar.adoc new file mode 100644 index 0000000000..a8ef90fb61 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-devtools-sidecar.adoc @@ -0,0 +1,7 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-devtools-sidecar_{context}"] += Adding the Ansible Development Tools sidecar container + +After the plug-ins are loaded, add the Ansible Development Container (`ansible-devtools-server`) in the {RHDH} pod as a sidecar container. + diff --git a/downstream/modules/devtools/proc-rhdh-download-plugins.adoc b/downstream/modules/devtools/proc-rhdh-download-plugins.adoc new file mode 100644 index 0000000000..b9ccfcd083 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-download-plugins.adoc @@ -0,0 +1,45 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-download-plugins_{context}"] += Downloading the Ansible plug-ins files + + +. Download the latest `.tar` file for the plug-ins from the link:{PlatformDownloadUrl}[Red Hat Ansible Automation Platform Product Software downloads page]. +The format of the filename is `ansible-backstage-rhaap-bundle-x.y.z.tar.gz`. +Substitute the Ansible plug-ins release version, for example `1.0.0`, for `x.y.z`. +. Create a directory on your local machine to store the `.tar` files. ++ +---- +$ mkdir /path/to/ +---- +. Set an environment variable (`$DYNAMIC_PLUGIN_ROOT_DIR`) to represent the directory path. ++ +---- +$ export DYNAMIC_PLUGIN_ROOT_DIR=/path/to/ +---- +. Extract the `ansible-backstage-rhaap-bundle-.tar.gz` contents to `$DYNAMIC_PLUGIN_ROOT_DIR`. ++ +---- +$ tar --exclude='*code*' -xzf ansible-backstage-rhaap-bundle-x.y.z.tar.gz -C $DYNAMIC_PLUGIN_ROOT_DIR +---- ++ +Substitute the Ansible plug-ins release version, for example `1.0.0`, for `x.y.z`. + +.Verification + +Run `ls` to verify that the extracted files are in the `$DYNAMIC_PLUGIN_ROOT_DIR` directory: + +---- +$ ls $DYNAMIC_PLUGIN_ROOT_DIR +ansible-plugin-backstage-rhaap-x.y.z.tgz +ansible-plugin-backstage-rhaap-x.y.z.tgz.integrity +ansible-plugin-backstage-rhaap-backend-x.y.z.tgz +ansible-plugin-backstage-rhaap-backend-x.y.z.tgz.integrity +ansible-plugin-scaffolder-backend-module-backstage-rhaap-x.y.z.tgz +ansible-plugin-scaffolder-backend-module-backstage-rhaap-x.y.z.tgz.integrity + +---- + +The files with the `.integrity` file type contain the plugin SHA value. +The SHA value is used during the plug-in configuration. + diff --git a/downstream/modules/devtools/proc-rhdh-enable-rhdh-authentication.adoc b/downstream/modules/devtools/proc-rhdh-enable-rhdh-authentication.adoc new file mode 100644 index 0000000000..c461aefbf6 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-enable-rhdh-authentication.adoc @@ -0,0 +1,12 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-enable-rhdh-authentication_{context}"] += Enabling {RHDH} authentication + +{RHDH} (RHDH) provides integrations for multiple Source Control Management (SCM) systems. +This is required by the plug-ins to create repositories. + +Refer to the +link:{BaseURL}/red_hat_developer_hub/1.2/html-single/administration_guide_for_red_hat_developer_hub/index#enabling-authentication[Enabling authentication in Red Hat Developer Hub] +chapter of the _Administration guide for Red Hat Developer Hub_. + diff --git a/downstream/modules/devtools/proc-rhdh-execute-automation-devspaces.adoc b/downstream/modules/devtools/proc-rhdh-execute-automation-devspaces.adoc new file mode 100644 index 0000000000..1f388c8245 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-execute-automation-devspaces.adoc @@ -0,0 +1,12 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-execute-automation-devspaces_{context}"] += Executing automation tasks in Dev Spaces + +The default configuration for Dev Spaces provides access to the Ansible command line tools. + +To execute an automation task in Dev Spaces from the VSCode user interface, +right-click a playbook name in the list of files and select *Run Ansible Playbook via ansible-navigator run* or *Run playbook via ansible-playbook*. + +image::rhdh-vscode-run-playbook.png[Run a playbook from VS Code] + diff --git a/downstream/modules/devtools/proc-rhdh-firewall-example-create-playbook.adoc b/downstream/modules/devtools/proc-rhdh-firewall-example-create-playbook.adoc new file mode 100644 index 0000000000..b19d16cfd4 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-firewall-example-create-playbook.adoc @@ -0,0 +1,51 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-firewall-example-create-playbook_{context}"] += Create a new playbook project to configure a firewall + +Use the Ansible plug-ins to create a new Ansible Playbook project. + +. Click the Ansible `A` icon in the {RHDH} navigation panel. +. From the *Create* dropdown menu on the landing page, select *Create Ansible Git Project*. +. Click *Choose* in the *Create Ansible Playbook Project* software template. +. Fill in the following information in the *Create Ansible Playbook Project* page: + +[cols="3,1,3,3" options="header"] +|=== +|Field |Required |Description |Example value +|Source code repository organization name or username +|Yes +|The name of your source code repository username or organization name. +|`my_github_username` +|Playbook repository name +|Yes +|The name of your new Git repository. +|`rhel_firewall_config` +|Playbook description +|No +|A description of the new playbook project. +|`This playbook configures firewalls on Red Hat Enterprise Linux systems` +|Playbook project's collection namespace +|Yes +|The new playbook Git project creates an example collection folder for you. +Enter a value for the collection namespace. +|`my_galaxy_username` +|Playbook project's collection name +|Yes +|This is the name of the example collection. +|`rhel_firewall_config` +|Catalog Owner Name +|Yes +|The name of the Developer Hub catalog item owner. It is a Red Hat Developer Hub field. +|`my_rhdh_username` +|System +|No +|This is a Red Hat Developer Hub field. +|`my_rhdh_linux_system` +|=== + +[start=5] +. Click *Review*. +. Click *Create* to provision your new playbook project. +. Click *Open in catalog* to view your project. + diff --git a/downstream/modules/devtools/proc-rhdh-firewall-example-discover.adoc b/downstream/modules/devtools/proc-rhdh-firewall-example-discover.adoc new file mode 100644 index 0000000000..03eec004a3 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-firewall-example-discover.adoc @@ -0,0 +1,27 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-firewall-example-discover_{context}"] += Discovering existing Ansible content for RHEL system roles + +Red Hat recommends that you use trusted automation content that has been tested and approved by Red Hat or your organization. + +{HubNameStart} is a central repository for discovering, downloading, and managing trusted content collections from Red Hat and its partners. +{PrivateHubNameStart} provides an on-premise solution for managing content collections. + +. Click on the Ansible `A` icon in the {RHDH} navigation panel. +. Click *Discover existing collections*. +. Click *Go to Automation Hub*. ++ +-- +** If {PrivateHubName} has been configured in the Ansible plug-ins, you are redirected to your *PrivateHubName* instance. +** If {PrivateHubName} has not been configured in the Ansible plug-ins installation configuration, +you will be redirected to the Red Hat Hybrid Console (RHCC) automation hub. +-- +In this example, you are redirected to the RHCC automation hub. +. If you are prompted to log in, provide your Red Hat Customer Portal credentials. +. Filter the collections with the `rhel firewall` keywords. ++ +The search returns the `rhel_system_roles` collection. + +The RHEL System Roles collection contains certified Ansible content that you can reuse to configure your firewall. + diff --git a/downstream/modules/devtools/proc-rhdh-firewall-example-edit.adoc b/downstream/modules/devtools/proc-rhdh-firewall-example-edit.adoc new file mode 100644 index 0000000000..f4c43e22d4 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-firewall-example-edit.adoc @@ -0,0 +1,27 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-firewall-example-edit_{context}"] += Editing your firewall playbook project + +The Ansible plug-ins integrate OpenShift Dev Spaces to edit your Ansible projects. +OpenShift Dev Spaces provides on-demand, web-based Integrated Development Environments (IDEs). + +Ansible Git projects provisioned using the Ansible plug-ins include best practice configurations for OpenShift Dev Spaces. +These configurations include installing the Ansible VS Code extension and providing access from the IDE terminal to Ansible development tools, +such as Ansible Navigator and Ansible Lint. + +[NOTE] +==== +OpenShift Dev Spaces is optional and it is not required to run the Ansible plug-ins. +It is a separate Red Hat product and it is not included in the {PlatformNameShort} or {RHDH} subscription. +==== + +This example assumes that OpenShift Dev Spaces has been configured in the Ansible plug-ins installation. + +.Procedure + +* In the *catalog item* view of your playbook project, click *Open Ansible project in OpenShift Dev Spaces*. ++ +A VS Code instance of OpenShift Dev Spaces opens in a new browser tab. +It automatically loads your new Ansible Playbook Git project. + diff --git a/downstream/modules/devtools/proc-rhdh-firewall-example-learn.adoc b/downstream/modules/devtools/proc-rhdh-firewall-example-learn.adoc new file mode 100644 index 0000000000..f9e350dbaa --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-firewall-example-learn.adoc @@ -0,0 +1,13 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-firewall-example-learn_{context}"] += Learning more about playbooks + +The first step is to learn more about Ansible playbooks using the available learning paths. + +. Click the Ansible `A` icon in the {RHDH} navigation panel. +. Click *Learn* and select the *Getting Started with Ansible Playbooks* learning path. +This redirects you to the Red Hat Developer website. +. If you are prompted to log in, create a Red Hat Developer account, or enter your details. +. Complete the learning path. + diff --git a/downstream/modules/devtools/proc-rhdh-firewall-example-new-playbook.adoc b/downstream/modules/devtools/proc-rhdh-firewall-example-new-playbook.adoc new file mode 100644 index 0000000000..9dd295201c --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-firewall-example-new-playbook.adoc @@ -0,0 +1,41 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-firewall-example-new-playbook_{context}"] += Creating a new playbook to automate the firewall configuration + +Create a new playbook and use the RHEL System Role collection to automate your {RHEL} firewall configuration. + +. In your Dev Spaces instance, click menu:File[New File]. +. Enter `firewall.yml` for the filename and click *OK* to save it in the root directory. +. Add the following lines to your `firewall.yml` file: ++ +---- +--- +- name: Open HTTPS and SSH on firewall + hosts: rhel + become: true + tasks: + - name: Use rhel system roles to allow https and ssh traffic + vars: + firewall: + - service: https + state: enabled + permanent: true + immediate: true + zone: public + - service: ssh + state: enabled + permanent: true + immediate: true + zone: public + ansible.builtin.include_role: + name: redhat.rhel_system_roles.firewall +---- + +[NOTE] +==== +You can use Ansible Lightspeed with IBM watsonx Code Assistant from the Ansible VS Code extension to help you generate playbooks. +For more information, refer to the +link:{BaseURL}/red_hat_ansible_lightspeed_with_ibm_watsonx_code_assistant/2.x_latest/html-single/red_hat_ansible_lightspeed_with_ibm_watsonx_code_assistant_user_guide/index[Ansible Lightspeed with IBM watsonx Code Assistant User Guide]. +==== + diff --git a/downstream/modules/devtools/proc-rhdh-install-dynamic-plugins-operator.adoc b/downstream/modules/devtools/proc-rhdh-install-dynamic-plugins-operator.adoc new file mode 100644 index 0000000000..1bc929bb15 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-install-dynamic-plugins-operator.adoc @@ -0,0 +1,80 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-install-dynamic-plugins-operator_{context}"] += Installing the dynamic plug-ins + +To install the dynamic plugins, add them to your ConfigMap for your {RHDHShort} plugin settings (for example, `rhaap-dynamic-plugins-config`). + +If you have not already created a ConfigMap file for your {RHDHShort} plugin settings, +create one by following the procedure in +link:{BaseURL}/red_hat_developer_hub/{RHDHVers}/html/administration_guide_for_red_hat_developer_hub/assembly-add-custom-app-file-openshift_admin-rhdh[Adding a custom application configuration file to Red Hat OpenShift Container Platform] section of the _Administration guide for Red Hat Developer Hub_. + +The example ConfigMap used in the following procedure is called `rhaap-dynamic-plugins-config`. + +.Procedure + +. Select *ConfigMaps* in the navigation pane of the OpenShift console. +. Select the `rhaap-dynamic-plugins-config` ConfigMap from the list. +. Select the *YAML* tab to edit the `rhaap-dynamic-plugins-config` ConfigMap. +. In the `data.dynamic-plugins.yaml.plugins` block, add the three dynamic plug-ins from the plug-in registry. +** For the `integrity` hash values, use the `.integrity` files in your `$DYNAMIC_PLUGIN_ROOT_DIR` directory that correspond to each plug-in, for example use `ansible-plugin-backstage-rhaap-x.y.z.tgz.integrity` for the `ansible-plugin-backstage-rhaap-x.y.z.tgz` plug-in. +** Replace `x.y.z` with the correct version of the plug-ins. ++ +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: rhaap-dynamic-plugins-config +data: + dynamic-plugins.yaml: | + ... + plugins: + - disabled: false + package: 'http://plugin-registry:8080/ansible-plugin-backstage-rhaap-x.y.z.tgz' + integrity: # Use hash in ansible-plugin-backstage-rhaap-x.y.z.tgz.integrity + pluginConfig: + dynamicPlugins: + frontend: + ansible.plugin-backstage-rhaap: + appIcons: + - importName: AnsibleLogo + name: AnsibleLogo + dynamicRoutes: + - importName: AnsiblePage + menuItem: + icon: AnsibleLogo + text: Ansible + path: /ansible + - disabled: false + package: >- + http://plugin-registry:8080/ansible-plugin-backstage-rhaap-backend-x.y.z.tgz + integrity: # Use hash in ansible-plugin-backstage-rhaap-backend-x.y.z.tgz.integrity + pluginConfig: + dynamicPlugins: + backend: + ansible.plugin-backstage-rhaap-backend: null + - disabled: false + package: >- + http://plugin-registry:8080/ansible-plugin-scaffolder-backend-module-backstage-rhaap-x.y.z.tgz + integrity: # Use hash in ansible-plugin-scaffolder-backend-module-backstage-rhaap-x.y.z.tgz.integrity + pluginConfig: + dynamicPlugins: + backend: + ansible.plugin-scaffolder-backend-module-backstage-rhaap: null + - ... + +---- +. Click btn:[Save]. +. To view the progress of the rolling restart: +.. In the *Topology* view, select the deployment pod and click *View logs*. +.. Select `install-dynamic-plugins` from the list of containers. + +.Verification + +. In the OpenShift console, select the *Topology* view. +. Click the *Open URL* icon on the deployment pod to open your {RHDH} instance in a browser window. + +The Ansible plug-in is present in the navigation pane, and if you select *Administration*, +the installed plug-ins are listed in the *Plugins* tab. + + diff --git a/downstream/modules/devtools/proc-rhdh-operator-add-custom-configmap-cr.adoc b/downstream/modules/devtools/proc-rhdh-operator-add-custom-configmap-cr.adoc new file mode 100644 index 0000000000..ca402f8f80 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-operator-add-custom-configmap-cr.adoc @@ -0,0 +1,34 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-operator-add-custom-configmap-cr_{context}"] += Adding the rhdh-custom-config file to the {RHDHShort} Operator Custom Resource + +Update the {RHDHShort} Operator Custom Resource to add the `rhdh-custom-config` file. + +. In the OpenShift console, select the *Topology* view. +. Click *More actions {MoreActionsIcon}* on the {RHDHShort} Operator Custom Resource and select *Edit backstage* to edit the Custom Resource. +. Add a `rawRuntimeConfig:` block for your custom ConfigMap `spec:` block. +It must have the same indentation level as the `spec.application:` block. ++ +---- +spec: + application: + ... + database: + ... + rawRuntimeConfig: + backstageConfig: rhdh-custom-config + +---- +. Click btn:[Save]. +. The {RHDHShort} Operator redeploys the pods to reflect the updated Custom Resource. + + +// .Verification + +// We should be able to see existing config maps that handle the app-config for rhdh instance and a different configMap that would serve the dynamic plugins that are being installed. + +// Considering the custom ConfigMaps are named - +// - app-config-rhdh - Holds baseUrl, template config, plugin-specific config, and RBAC configuration +// - rhaap-dynamic-plugins-config - contains dynamic plugins to be installed + diff --git a/downstream/modules/devtools/proc-rhdh-operator-install-add-plugins-app-config.adoc b/downstream/modules/devtools/proc-rhdh-operator-install-add-plugins-app-config.adoc new file mode 100644 index 0000000000..d5d8a85495 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-operator-install-add-plugins-app-config.adoc @@ -0,0 +1,109 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-operator-install-add-plugins-app-config_{context}"] += Adding the {AAPRHDHShort} configuration to app-config-rhdh + +Add the Ansible plugin-specific configuration to the `app-config-rhdh` ConfigMap, +and add a route for the templates for the automation content scaffolder plugin. + + +.Procedure + +. In the OpenShift web console, select *ConfigMaps*. +. Select the `app-config-rhdh` ConfigMap. +. Select the *YAML* tab to edit the ConfigMap. +. In the `data.app-config-custom.yaml.catalog` block, add a `locations:` block for the GitHub repository for the templates that are used to scaffold collections and playbooks. The `locations:` block must have the same indentation as the `rules:` block that precedes it. ++ +---- + locations: + - type: url + target: https://github.com/ansible/ansible-rhdh-templates/blob/main/all.yaml + rules: + - allow: [Template] +---- +. In the `data.app-config-custom.yaml` block, add an `ansible: block with the same indentation as `catalog`, to point to your Dev Spaces instance and your {PlatformNameShort} instance. +Replace `baseUrl` with the URLs for your own instances. ++ +---- + ansible: + devSpaces: + baseUrl: 'https://devspaces.apps.example-cluster.com/' + creatorService: + baseUrl: '127.0.0.1' + port: '8000' + rhaap: + baseUrl: 'https://controller.acme.demoredhat.com' + token: ... + checkSSL: false +---- ++ +After you have added both blocks, the ConfigFile resembles the following: ++ +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: app-config-rhdh +data: + app-config-custom.yaml: | + app: + baseUrl: https:// + backend: + baseUrl: https:// + cors: + origin: https:// + catalog: + rules: + - allow: [Component, System, Group, Resource, Location, Template, API, User] + locations: + - type: url # Add RHDH templates URL + target: https://github.com/ansible/ansible-rhdh-templates/blob/main/all.yaml + rules: + - allow: [Template] + ansible: # Add Dev Spaces and AAP URLs + devSpaces: + baseUrl: 'https://devspaces.apps.example-cluster.com/' + creatorService: + baseUrl: '127.0.0.1' + port: '8000' + rhaap: + baseUrl: 'https://controller.acme.demoredhat.com' + token: ... + checkSSL: false + auth: + environment: development + providers: + guest: + dangerouslyAllowOutsideDevelopment: true + github: + development: + clientId: '...' + clientSecret: '...' + integrations: + github: + - host: github.com + token: ... + enabled: + github: true + signInPage: github + permission: + enabled: true + rbac: + admin: + users: + - name: ... + superUsers: + - name: ... +---- +. Click btn:[Save]. ++ +Your {RHDHShort} instance reloads. + +.Verification + +. Select the *Topology* in the OpenShift web console to monitor the rolling update. +. When the update is complete, click the *Open URL* icon on the deployment pod to open your {RHDH} instance in a browser window. +. Select *Create* in the navigation pane. +The *Create Ansible Collection Project* and *Create Ansible Playbook Project* are displayed. +//When you perform a rolling update in OpenShift, the new pods are baked with the updated image or configuration before being deployed. + diff --git a/downstream/modules/devtools/proc-rhdh-set-up-controller-project.adoc b/downstream/modules/devtools/proc-rhdh-set-up-controller-project.adoc new file mode 100644 index 0000000000..7a10700f40 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-set-up-controller-project.adoc @@ -0,0 +1,26 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-set-up-controller-project_{context}"] += Setting up a controller project to run your playbook project + +.Procedure + +. The Ansible plug-ins provide a link to {PlatformNameShort}. +. Log in to your {RHDH} UI. +. Click the Ansible `A` icon in the {RHDH} navigation panel. +. Click *Operate* to display a link to your {PlatformNameShort} instance. ++ +If {ControllerName} was not included in your plug-in installation, a link to the product feature page is displayed. +. Click *Go to {PlatformNameShort}* to open your platform instance in a new browser tab. ++ +Alternatively, if your platform instance was not configured during the Ansible plug-in installation, navigate to your {ControllerName} instance in a browser and log in. +. Log in to {ControllerName}. +. Create a project in {PlatformNameShort} for the GitHub repository where you stored your playbook project. +Refer to the +link:{URLControllerUserGuide}/controller-projects[Projects] +chapter of _TitleControllerUserGuide_. +. Create a job template that uses a playbook from the project that you created. +Refer to the +link:{URLControllerUserGuide}/controller-workflow-job-templates[Workflow job templates] +chapter of _TitleControllerUserGuide_. + diff --git a/downstream/modules/devtools/proc-rhdh-uninstall-ocp-helm.adoc b/downstream/modules/devtools/proc-rhdh-uninstall-ocp-helm.adoc new file mode 100644 index 0000000000..a5e289e506 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-uninstall-ocp-helm.adoc @@ -0,0 +1,103 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-uninstall-ocp-helm_{context}"] += Uninstalling a Helm chart installation + +.Procedure + +. In {RHDH}, remove any software templates that use the `ansible:content:create` action. +. In the OpenShift Developer UI, navigate to menu:Helm[developer-hub > Actions > Upgrade > Yaml view]. +. Remove the Ansible plug-ins configuration under the `plugins` section. ++ +---- +... +global: +... + plugins: + - disabled: false + integrity: + package: 'http://plugin-registry:8080/ansible-plugin-backstage-rhaap-x.y.z.tgz' + pluginConfig: + dynamicPlugins: + frontend: + ansible.plugin-backstage-rhaap: + appIcons: + - importName: AnsibleLogo + name: AnsibleLogo + dynamicRoutes: + - importName: AnsiblePage + menuItem: + icon: AnsibleLogo + text: Ansible + path: /ansible + - disabled: false + integrity: + package: >- + http://plugin-registry:8080/ansible-plugin-scaffolder-backend-module-backstage-rhaap-x.y.z.tgz + pluginConfig: + dynamicPlugins: + backend: + ansible.plugin-scaffolder-backend-module-backstage-rhaap: null + - disabled: false + integrity: + package: >- + http://plugin-registry:8080/ansible-plugin-backstage-rhaap-backend-x.y.z.tgz + pluginConfig: + dynamicPlugins: + backend: + ansible.plugin-backstage-rhaap-backend: null +---- +. Remove the `extraContainers` section. ++ +---- +upstream: + backstage: | + ... + extraContainers: + - command: + - adt + - server + image: >- + registry.redhat.io/ansible-automation-platform-25/ansible-dev-tools-rhel8:latest + imagePullPolicy: IfNotPresent + name: ansible-devtools-server + ports: + - containerPort: 8000 + image: + pullPolicy: Always + pullSecrets: + - ... + - rhdh-secret-registry + ... +---- +. Click btn:[Upgrade]. +. Edit your custom {RHDH} config map, for example `app-config-rhdh`. +. Remove the `ansible` section. ++ +---- +data: + app-config-rhdh.yaml: | + ... + ansible: + analytics: + enabled: true + devSpaces: + baseUrl: '' + creatorService: + baseUrl: '127.0.0.1' + port: '8000' + rhaap: + baseUrl: '' + token: '' + checkSSL: true + automationHub: + baseUrl: '' + +---- +. Restart the {RHDH} deployment. +. Remove the `plugin-registry` OpenShift application. ++ +---- +oc delete all -l app=plugin-registry +---- + diff --git a/downstream/modules/devtools/proc-rhdh-uninstall-ocp-operator-plugins-cm.adoc b/downstream/modules/devtools/proc-rhdh-uninstall-ocp-operator-plugins-cm.adoc new file mode 100644 index 0000000000..290a121a2f --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-uninstall-ocp-operator-plugins-cm.adoc @@ -0,0 +1,45 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-uninstall-ocp-operator-plugins-cm_{context}"] += Removing the {AAPRHDHShort} from the ConfigMap + +// (this section covers uninstalling plugins only, not unloading or updating the sidecar container) +// To uninstall the dynamic plugins, you must update the `rhaap-dynamic-plugins-config` ConfigMap + +.Procedure + +. Open the custom ConfigMap where you referenced the {AAPRHDHShort}. +For this example, the ConfigMap name is `rhaap-dynamic-plugins-config`. +. Locate the dynamic plug-ins in the `plugins:` block. ++ +** To disable the plug-ins, update the `disabled` attribute to `true` for the three plug-ins. +** To delete the plug-ins, delete the lines that reference the plug-ins from the `plugins:` block: ++ +---- + +kind: ConfigMap +apiVersion: v1 +metadata: + name: rhaap-dynamic-plugins-config +data: + dynamic-plugins.yaml: | + ... + plugins: # Remove the Ansible plug-ins entries below the ‘plugins’ YAML key + - disabled: false + package: 'http://plugin-registry:8080/ansible-plugin-backstage-rhaap-x.y.z.tgz' + integrity: + ... + - disabled: false + package: >- + http://plugin-registry:8080/ansible-plugin-backstage-rhaap-backend-x.y.z.tgz + integrity: + ... + - disabled: false + package: >- + http://plugin-registry:8080/ansible-plugin-scaffolder-backend-module-backstage-rhaap-x.y.z.tgz + integrity: + ... + +---- +. Click btn:[Save]. + diff --git a/downstream/modules/devtools/proc-rhdh-uninstall-ocp-operator-rhdh-cm.adoc b/downstream/modules/devtools/proc-rhdh-uninstall-ocp-operator-rhdh-cm.adoc new file mode 100644 index 0000000000..51c5d98d04 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-uninstall-ocp-operator-rhdh-cm.adoc @@ -0,0 +1,43 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-uninstall-ocp-operator-rhdh-cm_{context}"] += Removing {PlatformNameShort} and Dev Spaces from the custom {RHDH} ConfigMap + +.Procedure + +. Open the custom {RHDH} ConfigMap where you added configuration for the templates and for connecting to {PlatformNameShort} and Dev Spaces. +In this example, the {RHDH} ConfigMap name is `app-config-rhdh`. ++ +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: rhdh-app-config +data: + app-config-custom.yaml: | + ... + catalog: + ... + locations: # Remove the YAML entry below the 'locations' YAML key + - type: url + target: https://github.com/ansible/ansible-rhdh-templates/blob/main/all.yaml + rules: + - allow: [Template] + ... + # Remove the entire 'ansible' YAML key and all sub-entries + ansible: + devSpaces: + baseUrl: '' + creatorService: + baseUrl: '127.0.0.1' + port: '8000' + rhaap: + baseUrl: '' + token: + checkSSL: false + +---- +. Remove the `url` in the `locations:` block to delete the templates from the {RHDHShort} instance. +. Remove the `ansible:` block to delete the Ansible-specific configuration. +. Click btn:[Save]. + diff --git a/downstream/modules/devtools/proc-rhdh-uninstall-ocp-operator-sidecar.adoc b/downstream/modules/devtools/proc-rhdh-uninstall-ocp-operator-sidecar.adoc new file mode 100644 index 0000000000..1269e25027 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-uninstall-ocp-operator-sidecar.adoc @@ -0,0 +1,22 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-uninstall-ocp-operator-sidecar_{context}"] += Removing the Sidecar container from the {RHDHShort} Custom Resource ConfigMap + +// Remove sidecar from rhdh-custom-config + +// Do this if you need config apart from the Sidecar container to your `rhdh-custom-config` Custom Resource ConfigMap. + +If you added extra configuration to the ConfigMap where you added the sidecar container (`rhdh-custom-config` in our example), then you cannot remove the reference to the ConfigMap from the {RHDHShort} Custom Resource. + +Instead, you must remove only the YAML code relating to the sidecar from the ConfigMap. + +---- +Add YAML code + +---- + +//created a custom resource ConfigMap +//as described in the xref:rhdh-create-operator-custom-configmap-operator-install_rhdh-install-ocp-operator[Creating a custom Operator ConfigMap] +//and you added only the {ToolsName} sidecar container to it, then you can remove the reference to the ConfigMap from the {RHDHShort} Custom Resource. + diff --git a/downstream/modules/devtools/proc-rhdh-uninstall-ocp-operator.adoc b/downstream/modules/devtools/proc-rhdh-uninstall-ocp-operator.adoc new file mode 100644 index 0000000000..294de90767 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-uninstall-ocp-operator.adoc @@ -0,0 +1,80 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-uninstall-ocp-operator_{context}"] += Uninstalling an Operator installation + +To delete the dynamic plug-ins from your installation, you must edit the ConfigMaps +that reference Ansible. + +The deployment auto reloads when the ConfigMaps are updated. +You do not need to reload the deployment manually . + +.Procedure + +. Open the custom ConfigMap where you referenced the dynamic plug-ins, `rhaap-dynamic-plugins-config`. +.. Locate the dynamic plug-ins in the `plugins:` block. ++ +*** To disable the dynamic plug-ins, update the `disabled` attribute to `true` for the three dynamic plug-ins. +*** To delete the dynamic plug-ins, delete the lines that reference the dynamic plug-ins from the `plugins:` block: ++ +---- + - disabled: false + package: 'http://plugin-registry:8080/ansible-plugin-backstage-rhaap-x.y.z.tgz' + integrity: + ... + - disabled: false + package: >- + http://plugin-registry:8080/ansible-plugin-backstage-rhaap-backend-x.y.z.tgz + integrity: + ... + - disabled: false + package: >- + http://plugin-registry:8080/ansible-plugin-scaffolder-backend-module-backstage-rhaap-x.y.z.tgz + integrity: +---- +.. Click btn:[Save]. +. To completely remove all the Ansible plugins remove the entire list entries that contain ++ +---- +http://plugin-registry:8080/ansible-plugin-backstage-rhaap-x.y.z.tgz +http://plugin-registry:8080/ansible-plugin-backstage-rhaap-backend-x.y.z.tgz +http://plugin-registry:8080/ansible-plugin-scaffolder-backend-module-backstage-rhaap-x.y.z.tgz +---- +. Open the custom {RHDH}ConfigMap, `app-config-rhdh`. +.. Remove the `locations:` block to delete the templates from the {RHDHShort} instance. +.. Remove the `ansible:` block to delete the Ansible-specific configuration. ++ +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: rhdh-app-config +data: + app-config-custom.yaml: | + app: + baseUrl: https:// + backend: + baseUrl: https:// + cors: + origin: https:// + catalog: + rules: + - allow: [Component, System, Group, Resource, Location, Template, API, User] + locations: + - type: url + target: https://github.com/ansible/ansible-rhdh-templates/blob/main/all.yaml + rules: + - allow: [Template] + ansible: + devSpaces: + baseUrl: 'https://devspaces.apps.ansible-rhdh-dev.testing.ansible.com/' + creatorService: + baseUrl: '127.0.0.1' + port: '8000' + rhaap: + baseUrl: 'https://controller.acme.demoredhat.com' + token: ... + checkSSL: false +---- +. Click btn:[Save]. + diff --git a/downstream/modules/devtools/proc-rhdh-uninstall-ocp-remove-sidecar-cr.adoc b/downstream/modules/devtools/proc-rhdh-uninstall-ocp-remove-sidecar-cr.adoc new file mode 100644 index 0000000000..fc1abdf258 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-uninstall-ocp-remove-sidecar-cr.adoc @@ -0,0 +1,38 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-uninstall-ocp-operator-remove-sidecar-cr_{context}"] += Removing the custom resource ConfigMap from the {RHDHShort} Operator Custom Resource + +// If a custom resource is created to load the sidecar container, +//we need to be specific about what we remove while we are willing to remove just the sidecar container, +// because the customResouce acts as the source of truth for the entire RHDH deployment then. +// +If you created a custom resource ConfigMap +as described in the xref:rhdh-create-operator-custom-configmap-operator-install_rhdh-install-ocp-operator[Creating a custom Operator ConfigMap] +and you added only the {ToolsName} sidecar container to it, then you can remove the reference to the ConfigMap from the {RHDHShort} Custom Resource. + +[NOTE] +==== +Ensure that you do not have any additional need for the custom ConfigMap before you remove it from the Custom Resource. +==== + +.Procedure + +. In the OpenShift console, select the Topology view. +. Click *More actions {MoreActionsIcon}* on the RHDH Operator Custom Resource and select *Edit backstage* to edit the Custom Resource. +. Remove the ConfigMap reference from the {RHDHShort} Operator Custom Resource. +For this example, the ConfigMap name is `rhdh-custom-config`. ++ +---- +... +spec: + application: + ... + database: + ... + rawRuntimeConfig: # Remove the backstageConfig’ YAML key below + backstageConfig: rhdh-custom-config + +---- +. Click btn:[Save]. + diff --git a/downstream/modules/devtools/proc-rhdh-update-plugin-registry.adoc b/downstream/modules/devtools/proc-rhdh-update-plugin-registry.adoc new file mode 100644 index 0000000000..67bff58b48 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-update-plugin-registry.adoc @@ -0,0 +1,50 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-update-plugin-registry_{context}"] += Updating the plug-in registry + +Rebuild your plug-in registry application in your OpenShift cluster with the latest Ansible plug-ins files. + +.Prerequisites + +* You have downloaded the Ansible plug-ins files. +* You have set an environment variable, for example (`$DYNAMIC_PLUGIN_ROOT_DIR`), +to represent the path to the local directory where you have stored the `.tar` files. + +.Procedure + +. Log in to your {OCPShort} instance with credentials to create a new application. +. Open your {RHDH} OpenShift project. ++ +---- +$ oc project +---- +. Run the following commands to update your plug-in registry build in the OpenShift cluster. +The commands assume that `$DYNAMIC_PLUGIN_ROOT_DIR` represents the directory for your `.tar` files. +Replace this in the command if you have chosen a different environment variable name. ++ +---- +$ oc start-build plugin-registry --from-dir=$DYNAMIC_PLUGIN_ROOT_DIR --wait +---- ++ +---- +$ oc start-build plugin-registry --from-dir=$DYNAMIC_PLUGIN_ROOT_DIR --wait +---- +. When the registry has started, the output displays the following message: ++ +---- +Uploading directory "/path/to/dynamic_plugin_root" as binary input for the build … +Uploading finished +build.build.openshift.io/plugin-registry-1 started +---- + +.Verification + +Verify that the `plugin-registry` has been updated. + +. In the OpenShift UI, click *Topology*. +. Click the *redhat-developer-hub* icon to view the pods for the plug-in registry. +. Click *View logs* for the plug-in registry pod. +. Open the *Terminal* tab and run `ls` to view the `.tar` files in the `plug-in registry`. +. Verify that the new `.tar` file has been uploaded. + diff --git a/downstream/modules/devtools/proc-rhdh-update-plugins-helm-version-numbers.adoc b/downstream/modules/devtools/proc-rhdh-update-plugins-helm-version-numbers.adoc new file mode 100644 index 0000000000..d458fa3975 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-update-plugins-helm-version-numbers.adoc @@ -0,0 +1,59 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-update-plugins-helm-version-numbers_{context}"] += Updating the Ansible plug-ins version numbers for a Helm installation + +.Procedure + +. Log in to your {OCPShort} instance. +. In the OpenShift Developer UI, navigate to menu:Helm[developer-hub > Actions > Upgrade > Yaml view]. +. Update the Ansible plug-ins version numbers and associated `.integrity` file values. ++ +---- +... +global: +... + plugins: + - disabled: false + integrity: + package: 'http://plugin-registry:8080/ansible-plugin-backstage-rhaap-x.y.z.tgz' + pluginConfig: + dynamicPlugins: + frontend: + ansible.plugin-backstage-rhaap: + appIcons: + - importName: AnsibleLogo + name: AnsibleLogo + dynamicRoutes: + - importName: AnsiblePage + menuItem: + icon: AnsibleLogo + text: Ansible + path: /ansible + - disabled: false + integrity: + package: >- + http://plugin-registry:8080/ansible-plugin-scaffolder-backend-module-backstage-rhaap-x.y.z.tgz + pluginConfig: + dynamicPlugins: + backend: + ansible.plugin-scaffolder-backend-module-backstage-rhaap: null + - disabled: false + integrity: + package: >- + http://plugin-registry:8080/ansible-plugin-backstage-rhaap-backend-x.y.z.tgz + pluginConfig: + dynamicPlugins: + backend: + ansible.plugin-backstage-rhaap-backend: null + +---- +. Click btn:[Upgrade]. ++ +The developer hub pods restart and the plug-ins are installed. + +.Verification + +. In the OpenShift UI, click *Topology*. +. Make sure that the {RHDH} instance is available. + diff --git a/downstream/modules/devtools/proc-rhdh-update-plugins-operator-version-numbers.adoc b/downstream/modules/devtools/proc-rhdh-update-plugins-operator-version-numbers.adoc new file mode 100644 index 0000000000..e0608f6ef2 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-update-plugins-operator-version-numbers.adoc @@ -0,0 +1,48 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-update-plugins-operator-version-numbers_{context}"] += Updating the Ansible plug-ins version numbers for an Operator installation + +.Procedure + +. Log in to your {OCPShort} instance. +. In the OpenShift UI, open the ConfigMap where you added the {AAPRHDHShort} during installation. +This example uses a ConfigMap file called `rhaap-dynamic-plugins-config`. +. Update `x.y.z` with the version numbers for the updated {AAPRHDHShort}. +. Update the integrity values for each plug-in with the `.integrity` value from the corresponding extracted {AAPRHDHShort} `.tar` file. +// For example, use the `.integrity` value from `ansible-plugin-backstage-rhaap-x.y.z.tgz` for the `ansible-plugin-backstage-rhaap-x.y.z.tgz.integrity` key. ++ +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: rhaap-dynamic-plugins-config +data: + dynamic-plugins.yaml: | + ... + plugins: # Update the Ansible plug-in entries below with the updated plugin versions + - disabled: false + package: 'http://plugin-registry:8080/ansible-plugin-backstage-rhaap-x.y.z.tgz' + integrity: # Use hash in ansible-plugin-backstage-rhaap-x.y.z.tgz.integrity + ... + - disabled: false + package: >- + http://plugin-registry:8080/ansible-plugin-backstage-rhaap-backend-x.y.z.tgz + integrity: # Use hash in ansible-plugin-backstage-rhaap-backend-x.y.z.tgz.integrity + ... + - disabled: false + package: >- + http://plugin-registry:8080/ansible-plugin-scaffolder-backend-module-backstage-rhaap-x.y.z.tgz + integrity: # Use hash in ansible-plugin-scaffolder-backend-module-backstage-rhaap-x.y.z.tgz.integrity + ... + +---- +. Click btn:[Save]. ++ +The developer hub pods restart and the plug-ins are installed. + +.Verification + +. In the OpenShift UI, click *Topology*. +. Make sure that the {RHDH} instance is available. + diff --git a/downstream/modules/devtools/proc-rhdh-view.adoc b/downstream/modules/devtools/proc-rhdh-view.adoc new file mode 100644 index 0000000000..0c78e60f73 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-view.adoc @@ -0,0 +1,7 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-view_{context}"] += Viewing your projects + +To view the projects that you have created in the plug-in, navigate to the *Overview* page for the Ansible plug-in and click *My Items*. + diff --git a/downstream/modules/devtools/proc-rhdh-warning-aap-ooc.adoc b/downstream/modules/devtools/proc-rhdh-warning-aap-ooc.adoc new file mode 100644 index 0000000000..13f2c52be1 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-warning-aap-ooc.adoc @@ -0,0 +1,22 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-warning-aap-ooc_{context}"] += {PlatformNameShort} subscription is out of compliance + +The following warning indicates that the Ansible plug-ins successfully retrieved the {PlatformNameShort} subscription status. +However, the subscription is out of compliance. + +---- +Subscription non-compliant +The connected Ansible Automation Platform subscription is out of compliance. +Contact your Red Hat account team to obtain a new subscription entitlement. +Learn more about account compliance. +---- + +[discrete] +== Remediation steps + +. Contact your Red Hat account team to obtain a new subscription entitlement. +. Learn more about link:https://access.redhat.com/solutions/6988859[account compliance]. +. When the subscription is in compliance, restart the {RHDH} pod to initiate a new subscription query. + diff --git a/downstream/modules/devtools/proc-rhdh-warning-invalid-aap-config.adoc b/downstream/modules/devtools/proc-rhdh-warning-invalid-aap-config.adoc new file mode 100644 index 0000000000..6f42e6dbde --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-warning-invalid-aap-config.adoc @@ -0,0 +1,20 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-warning-invalid-aap-config_{context}"] += Invalid Ansible Automation Platform configuration + +The following warning indicates that the {PlatformNameShort} configuration section is invalid or incomplete. + +---- +Invalid resource for Ansible Automation Platform +Verify that the resource url for Ansible Automation Platform are correctly configured in the Ansible plug-ins. +For help, please refer to the Ansible plug-ins installation guide. +---- + +[discrete] +== Remediation steps + +. Verify that the `rhaap` section of the Ansible plug-ins ConfigMap is correctly configured and contains all the necessary entries. +For more information, refer to xref:rhdh-configure-aap-details_rhdh-ocp-required-installation[Configuring Ansible Automation Platform details]. +. After correcting the configuration, restart the {RHDH} pod to initiate a subscription query. + diff --git a/downstream/modules/devtools/proc-rhdh-warning-invalid-aap-subscription.adoc b/downstream/modules/devtools/proc-rhdh-warning-invalid-aap-subscription.adoc new file mode 100644 index 0000000000..68e5c0a2fa --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-warning-invalid-aap-subscription.adoc @@ -0,0 +1,20 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-warning-invalid-aap-subscription_{context}"] += Invalid {PlatformNameShort} subscription + +The following warning indicates that the Ansible plug-ins successfully retrieved the {PlatformNameShort} subscription status. +However, the subscription type is invalid for {PlatformNameShort}. + +---- +Invalid subscription +The connected Ansible Automation Platform subscription is invalid. +Contact your Red Hat account team, or start an Ansible Automation Platform trial. +---- + +[discrete] +== Remediation steps + +. Contact your Red Hat account team to obtain a new subscription entitlement or link:http://red.ht/aap-rhdh-plugins-start-trial[start an {PlatformNameShort} trial]. +. When you have updated the subscription, restart the {RHDH} pod to initiate a new subscription query. + diff --git a/downstream/modules/devtools/proc-rhdh-warning-unable-authenticate-aap.adoc b/downstream/modules/devtools/proc-rhdh-warning-unable-authenticate-aap.adoc new file mode 100644 index 0000000000..3a41fa2bf4 --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-warning-unable-authenticate-aap.adoc @@ -0,0 +1,22 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-warning-unable-authenticate-aap_{context}"] += Unable to authenticate to Ansible Automation Platform + +The following warning indicates that the Ansible plug-ins were not able to authenticate with {PlatformNameShort} to query the subscription status. + +---- +Unable to authenticate to Ansible Automation Platform +Verify that the authentication details for Ansible Automation Platform are correctly configured in the Ansible plug-ins. +For help, please refer to the Ansible plug-ins installation guide. +---- + +[discrete] +== Remediation steps + +. Verify that the automation controller Personal Access Token (PAT) configured in the Ansible plug-ins is correct. +For more information, refer to the +link:{URLCentralAuth}/gw-token-based-authentication#proc-controller-apps-create-tokens[Adding tokens] +section of _TitleCentralAuth_. +. After correcting the authentication details, restart the {RHDH} pod to initiate a subscription query. + diff --git a/downstream/modules/devtools/proc-rhdh-warning-unable-connect-aap.adoc b/downstream/modules/devtools/proc-rhdh-warning-unable-connect-aap.adoc new file mode 100644 index 0000000000..9d84c7c8bd --- /dev/null +++ b/downstream/modules/devtools/proc-rhdh-warning-unable-connect-aap.adoc @@ -0,0 +1,20 @@ +:_mod-docs-content-type: PROCEDURE + +[id="rhdh-warning-unable-connect-aap_{context}"] += Unable to connect to Ansible Automation Platform + +The following warning indicates that the automation controller details are not configured, or the controller instance API is unreachable to query the subscription status. + +---- +Unable to connect to Ansible Automation Platform +Verify that Ansible Automation Platform is reachable and correctly configured in the Ansible plug-ins. +To get help, please refer to the Ansible plug-ins installation guide. +---- + +[discrete] +== Remediation steps + +. Verify that {PlatformNameShort} is reachable and correctly configured in the `rhaap` section of the ConfigMap. +. Ensure the `checkSSL` key is correctly set for your environment. +. After correcting the configuration details, restart the {RHDH} pod to initiate a subscription query. + diff --git a/downstream/modules/devtools/proc-running-playbook.adoc b/downstream/modules/devtools/proc-running-playbook.adoc deleted file mode 100644 index 303b180a20..0000000000 --- a/downstream/modules/devtools/proc-running-playbook.adoc +++ /dev/null @@ -1,36 +0,0 @@ -[id="running-playbook"] - -= Running your playbook - -[role="_abstract"] - -The Ansible {VSCode} extension provides two options to run your playbook: - -* `ansible-playbook` runs the playbook on your local machine using Ansible Core. -* `ansible-navigator` runs the playbook in an execution environment in the same manner that {PlatformNameShort} runs an automation job. -You specify the base image for the execution environment in the Ansible extension settings. - -== Running your playbook with `ansible-playbook` - -.Procedure - -* To run a playbook, right-click the playbook name in the Explorer pane, then select menu:Run Ansible Playbook via[Run playbook via `ansible-playbook`]. - -image:ansible-playbook-run.png[Run playbook via ansible-playbook] - -The output is displayed in the *Terminal* tab of the {VSCode} terminal. -The `ok=2` and `failed=0` messages indicate that the playbook ran successfully. - -image:ansible-playbook-success.png[Success message for ansible-playbook execution] - -== Running your playbook with `ansible-navigator` - -.Prerequisites - -* In the Ansible extension settings, enable the use of an execution environment in Ansible Execution Environment > Enabled. -* Enter the path or URL for the execution environment image in Ansible > Execution Environment: Image. - -.Procedure - -* To run a playbook, right-click the playbook name in the Explorer pane, then select menu:Run Ansible Playbook via[Run playbook via ansible-navigator run]. - diff --git a/downstream/modules/devtools/proc-scaffolding-playbook-project.adoc b/downstream/modules/devtools/proc-scaffolding-playbook-project.adoc index 57e07294fb..3d164e3094 100644 --- a/downstream/modules/devtools/proc-scaffolding-playbook-project.adoc +++ b/downstream/modules/devtools/proc-scaffolding-playbook-project.adoc @@ -1,4 +1,4 @@ -[id="scaffolding-playbook-project"] +[id="scaffolding-playbook-project_{context}"] = Scaffolding a playbook project @@ -15,19 +15,32 @@ The following steps describe the process for scaffolding a new playbook project .Procedure +. Open {VSCode}. . Click the Ansible icon in the {VSCode} activity bar to open the Ansible extension. -. Type kbd:[Ctrl+Shift+P] to display the VSCode command palette. -. In the input field, enter `Create new Ansible project`. The **Create Ansible Project** tab opens. -. Enter a name for the directory where you want to scaffold your new playbook project. +. Select *Get started* in the *Ansible content creator* section. ++ +The *Ansible content creator* tab opens. +. In the *Create* section, click *Ansible playbook project*. ++ +The *Create Ansible project* tab opens. +. In the form in the *Create Ansible project* tab, enter the following: ++ +* *Destination directory*: Enter the path to the directory where you want to scaffold your new playbook project. + [NOTE] ==== -If you enter a current directory name, the scaffolding process will overwrite the contents of that directory. +If you enter an existing directory name, the scaffolding process overwrites the contents of that directory. +The scaffold process only allows you to use an existing directory if you enable the `Force` option. ==== -. Add an organization name and a project name. -. Click btn:[Create] to begin creating your project. +** If you are using the containerized version of Ansible Dev tools, the destination directory path is relative to the container, not a path in your local system. To discover the current directory name in the container, run the `pwd` command in a terminal in VS Code. If the current directory in the container is `workspaces`, enter `workspaces/`. +** If you are using a locally installed version of Ansible Dev tools, enter the full path to the directory, for example `/user//projects/`. +* *SCM organization and SCM project*: Enter a name for the directory and subdirectory where you can store roles that you create for your playbooks. +. Enter a name for the directory where you want to scaffold your new playbook project. -After the project directory has been created, the following message appears in the Logs pane of the Create Ansible Project tab: +.Verification + +After the project directory has been created, the following message appears in the *Logs* pane of the *Create Ansible Project* tab. +In this example, the destination directory name is `destination_directory_name`. ---- ------------------ ansible-creator logs ------------------ @@ -37,17 +50,34 @@ After the project directory has been created, the following message appears in t The following directories and files are created in your project directory: ---- +$ tree -a -L 5 . +├── .devcontainer +│   ├── devcontainer.json +│   ├── docker +│   │   └── devcontainer.json +│   └── podman +│   └── devcontainer.json +├── .gitignore ├── README.md ├── ansible-navigator.yml ├── ansible.cfg ├── collections -│ ├── ansible_collections -│ └── requirements.yml +│   ├── ansible_collections +│   │   └── scm_organization_name +│   │   └── scm_project_name +│   └── requirements.yml ├── devfile.yaml ├── inventory -│ ├── group_vars -│ ├── host_vars -│ └── hosts.yml +│   ├── group_vars +│   │   ├── all.yml +│   │   └── web_servers.yml +│   ├── host_vars +│   │   ├── server1.yml +│   │   ├── server2.yml +│   │   ├── server3.yml +│   │   ├── switch1.yml +│   │   └── switch2.yml +│   └── hosts.yml ├── linux_playbook.yml ├── network_playbook.yml └── site.yml diff --git a/downstream/modules/devtools/proc-setup-vscode-workspace.adoc b/downstream/modules/devtools/proc-setup-vscode-workspace.adoc index ddd87b26be..9f238252ed 100644 --- a/downstream/modules/devtools/proc-setup-vscode-workspace.adoc +++ b/downstream/modules/devtools/proc-setup-vscode-workspace.adoc @@ -1,4 +1,4 @@ -[id="setup-vscode-workspace"] +[id="setup-vscode-workspace_{context}"] = Setting up a {VSCode} workspace diff --git a/downstream/modules/devtools/proc-writing-playbook.adoc b/downstream/modules/devtools/proc-writing-playbook.adoc deleted file mode 100644 index 1374505b45..0000000000 --- a/downstream/modules/devtools/proc-writing-playbook.adoc +++ /dev/null @@ -1,34 +0,0 @@ -[id="writing-playbook"] - -= Writing your first playbook - -[role="_abstract"] -The instructions below describe how {ToolsName} help you to create and run your first playbook in {VSCode}. - -.Prerequisites - -.You have installed and opened the Ansible {VSCode} extension. -.You have installed `ansible-devtools`. -.You have set up and activated a Python virtual environment in {VSCode}. -.You have opened a terminal in {VSCode}. - -.Procedure - -. Open a YAML file in {VSCode} for your playbook. -You can create a new file or use the empty placeholder YAML file that you set up when you created the directory and playbook. -. Add the following example code into the playbook file and save the file. -The playbook consists of a single play that executes a ping to your local machine. -+ ----- -- name: My first play - hosts: localhost - tasks: - - name: Ping my hosts - ansible.builtin.ping: - ----- -+ -`Ansible-lint` runs in the background and displays errors in the *Problems* tab of the terminal. -There are no errors in this playbook: - -image::ansible-lint-no-errors.png[Ansible-lint showing no errors in a playbook] diff --git a/downstream/modules/devtools/ref-devtools-components.adoc b/downstream/modules/devtools/ref-devtools-components.adoc index a086e4f822..2de31c17e5 100644 --- a/downstream/modules/devtools/ref-devtools-components.adoc +++ b/downstream/modules/devtools/ref-devtools-components.adoc @@ -1,26 +1,28 @@ -[id="devtools-components_context"] +[id="devtools-components_{context}"] = {ToolsName} components [role="_abstract"] -You can access most {ToolsName} from the Ansible {VSCode} extension, and others from the command line. +You can operate some {ToolsName} from the {VSCode} UI when you have installed the Ansible extension, +and the remainder from the command line. +{VSCode} is a free open-source code editor available on Linux, Mac, and Windows. -* Ansible {VSCode} extension: -This is not packaged with the {PlatformNameShort} RPM package, but it is an integral part of the automation creation process. -From the Ansible {VSCode} extension, you can use the {ToolsName} for the following tasks: +Ansible {VSCode} extension:: +This is not packaged with the {PlatformNameShort} RPM package, but it is an integral part of the automation creation workflow. +From the {VSCode} UI, you can use the {ToolsName} for the following tasks: + -- ** Scaffold directories for a playbook project or a collection. ** Write playbooks with the help of syntax highlighting and auto-completion. ** Debug your playbooks with a linter. -** Execute playbooks with Ansible Core with `ansible-playbook`. +** Execute playbooks with Ansible Core using `ansible-playbook`. ** Execute playbooks in an execution environment with `ansible-navigator`. -- + From the {VSCode} extension, you can also connect to {LightspeedFullName}. -* Command-line {ToolsName}: you can perform the following tasks with {ToolsName} from the command line, +Command-line {ToolsName}:: You can perform the following tasks with {ToolsName} from the command line, including the terminal in {VSCode}: ** Create an execution environment. ** Test your playbooks, roles, modules, plugins and collections. diff --git a/downstream/modules/devtools/ref-devtools-workflow.adoc b/downstream/modules/devtools/ref-devtools-workflow.adoc index 5e1e51c94d..fd84607110 100644 --- a/downstream/modules/devtools/ref-devtools-workflow.adoc +++ b/downstream/modules/devtools/ref-devtools-workflow.adoc @@ -1,21 +1,28 @@ -[id="devtools-workflow_context"] +[id="devtools-workflow_{context}"] = Workflow [role="_abstract"] -In the build stage, you create a new playbook project within a virtual environment, using {VSCode}. The following is a typical workflow: +== Create + +In the create stage, you create a new playbook project locally, using {VSCode}. The following is a typical workflow: . Install and run the Ansible extension in {VSCode}. -. Create or open a workspace for your playbooks directory in {VSCode}. -. Create and activate a Python virtual environment for your workspace and select it in {VSCode}. . Scaffold a playbook project from {VSCode}. -. Add the collection names that your playbook uses to the requirements file. -// . Use ansible-dev-environment to create a virtual environment for your project. This installs any dependencies from the requirements file. -. Edit your playbook. Ansible-lint suggests corrections. -. Add roles in the roles directory. -. Create an execution environment that reflects the environment that {PlatformNameShort} uses. -. Run your playbooks from the Ansible extension. -// . As you develop your playbooks and roles, you can incorporate new dependencies into your virtual environment by re-running ansible-dev-environment. -// . Use `molecule` to test your playbooks. Create one scenario for every playbook in your project. +. Add playbook files to your project and edit them in {VSCode}. + +== Test + +. Debug your playbook with the help of `ansible-lint`. +. Select or create an {ExecEnvNameSing} so that your local environment replicates the environment on {PlatformNameShort}. +. Run your playbooks from {VSCode}, using `ansible-playbook` or using `ansible-navigator` with an {ExecEnvShort}. +. Test your playbooks by running them on an {ExecEnvShort} that replicates your production environment. + +== Deploy + +. Push your playbooks project to a source control repository. +. Set up credentials on {PlatformNameShort} to pull from your source control repository and create a project for your playbook repository. +. If you have created an {ExecEnvShort}, push it to {PrivateHubName}. +. Create a job template on {PlatformNameShort} that runs a playbook from your project, and specify the {ExecEnvShort} that you want to use. diff --git a/downstream/modules/devtools/ref-rhdh-about-plugins.adoc b/downstream/modules/devtools/ref-rhdh-about-plugins.adoc new file mode 100644 index 0000000000..123313e0c3 --- /dev/null +++ b/downstream/modules/devtools/ref-rhdh-about-plugins.adoc @@ -0,0 +1,15 @@ +:_mod-docs-content-type: REFERENCE + +[id="rhdh-about-plugins_{context}"] += {AAPRHDH} + +{AAPRHDH} deliver an Ansible-first {RHDH} user experience that simplifies the automation experience for Ansible users of all skill levels. +The Ansible plug-ins provide curated content and features to accelerate Ansible learner onboarding and streamline Ansible use case adoption across your organization. + +The Ansible plug-ins provide: + +* A customized home page and navigation tailored to Ansible users. +* Curated Ansible learning paths to help users new to Ansible. +* Software templates for creating Ansible playbook and collection projects that follow best practices. +* Links to supported development environments and tools with opinionated configurations. + diff --git a/downstream/modules/devtools/ref-rhdh-about-rhdh.adoc b/downstream/modules/devtools/ref-rhdh-about-rhdh.adoc new file mode 100644 index 0000000000..21bac74849 --- /dev/null +++ b/downstream/modules/devtools/ref-rhdh-about-rhdh.adoc @@ -0,0 +1,6 @@ +:_mod-docs-content-type: REFERENCE + +[id="rhdh-about-rhdh_{context}"] += Red Hat Developer Hub + +{RHDH} (RHDH) serves as an open developer platform designed for building developer portals. diff --git a/downstream/modules/devtools/ref-rhdh-architecture.adoc b/downstream/modules/devtools/ref-rhdh-architecture.adoc new file mode 100644 index 0000000000..b8dc539e88 --- /dev/null +++ b/downstream/modules/devtools/ref-rhdh-architecture.adoc @@ -0,0 +1,7 @@ +:_mod-docs-content-type: REFERENCE + +[id="rhdh-architecture_{context}"] += Architecture + +image::rhdh-ansible-plugin-architecture.png[Ansible plugin for Red Hat Developer Hub architecture] + diff --git a/downstream/modules/devtools/ref-rhdh-dashboard.adoc b/downstream/modules/devtools/ref-rhdh-dashboard.adoc new file mode 100644 index 0000000000..a08bd4c941 --- /dev/null +++ b/downstream/modules/devtools/ref-rhdh-dashboard.adoc @@ -0,0 +1,32 @@ +:_mod-docs-content-type: REFERENCE + +[id="rhdh-dashboard_{context}"] += Dashboard navigation + +When you log in to {RHDH} (RHDH), the main RHDH menu and dashboard are displayed. + +To view the dashboard for {AAPRHDH}, click *Ansible* in the {RHDH} navigation panel. + +image::rhdh-plugin-dashboard.png[Ansible plug-in dashboard] + +The plug-in dashboard illustrates the steps you need to take from learning about Ansible to deploying automation jobs from {PlatformNameShort}: + +* *Overview* displays the main dashboard page. +* *Learn* provides links to resources curated by Red Hat that introduce you to Ansible and provide step-by-step examples to get you started. +For more information, see +xref:rhdh-learning_rhdh-using[Learning about Ansible]. +* *Discover existing collections* links to {PrivateHubName}, if configured in the plug-ins, or to {HubName} hosted on the Red Hat Hybrid Cloud Console. +{HubNameStart} stores existing collections and execution environments that you can use in your projects. +For more information, see +xref:rhdh-discover-collections_rhdh-using[Discovering existing collections]. +* *Create* creates new projects in your configured Source Control Management platforms such as GitHub. +For more information, see +xref:rhdh-create_rhdh-using[Creating a project]. +* *Develop* links you to OpenShift Dev Spaces, if configured in the Ansible plug-ins installation. +OpenShift Dev Spaces provides on-demand, web-based Integrated Development Environments (IDEs), where you can develop automation content. +For more information, see +xref:rhdh-develop-projects_rhdh-using[Developing projects]. +* *Operate* connects you to {PlatformNameShort}, where you can create and run automation jobs that use the projects you have developed. +For more information, see +xref:rhdh-set-up-controller-project_rhdh-using[Setting up a controller project to run your playbook project]. + diff --git a/downstream/modules/devtools/ref-rhdh-discover-collections.adoc b/downstream/modules/devtools/ref-rhdh-discover-collections.adoc new file mode 100644 index 0000000000..07f9d797cc --- /dev/null +++ b/downstream/modules/devtools/ref-rhdh-discover-collections.adoc @@ -0,0 +1,14 @@ +:_mod-docs-content-type: REFERENCE + +[id="rhdh-discover-collections_{context}"] += Discovering existing collections + +From the *Overview* page in the Ansible plug-ins dashboard on {RHDH}, click *Discover Existing Collections*. + +The links in this pane provide access to the source of reusable automation content collections that you configured during plug-in installation. + +If you configured {PrivateHubName} when installing the plug-in, you can click *Go to Automation Hub* to view the collections and {ExecEnvShort}s that your enterprise has curated. + +If you did not configure a {PrivateHubName} URL when installing the plug-in, the *Discover existing collection* pane provides a link to Red Hat {HubName} on console.redhat.com. +You can explore certified and validated Ansible content collections on this site. + diff --git a/downstream/modules/devtools/ref-rhdh-full-aap-configmap-example.adoc b/downstream/modules/devtools/ref-rhdh-full-aap-configmap-example.adoc new file mode 100644 index 0000000000..ded01270e5 --- /dev/null +++ b/downstream/modules/devtools/ref-rhdh-full-aap-configmap-example.adoc @@ -0,0 +1,38 @@ +:_mod-docs-content-type: REFERENCE + +[id="rhdh-full-aap-configmap-example_{context}"] += Full app-config-rhdh ConfigMap example for Ansible plug-ins entries + +---- +kind: ConfigMap +... +metadata: + name: app-config-rhdh + ... +data: + app-config-rhdh.yaml: |- + ansible: + creatorService: + baseUrl: 127.0.0.1 + port: '8000' + rhaap: + baseUrl: '' + token: '' + checkSSL: + # Optional integrations + devSpaces: + baseUrl: '' + automationHub: + baseUrl: '' + + ... + catalog: + locations: + - type: url + target: https://github.com/ansible/ansible-rhdh-templates/blob/main/all.yaml + rules: + - allow: [Template] + ... + +---- + diff --git a/downstream/modules/devtools/ref-rhdh-full-helm-chart-ansible-plugins.adoc b/downstream/modules/devtools/ref-rhdh-full-helm-chart-ansible-plugins.adoc new file mode 100644 index 0000000000..f0082d14fa --- /dev/null +++ b/downstream/modules/devtools/ref-rhdh-full-helm-chart-ansible-plugins.adoc @@ -0,0 +1,65 @@ +:_mod-docs-content-type: REFERENCE + +[id="rhdh-full-helm-chart-ansible-plugins_{context}"] += Full Helm chart config example for Ansible plug-ins + +---- +global: + ... + dynamic: + ... + plugins: + - disabled: false + integrity: + package: 'http://plugin-registry:8080/ansible-plugin-backstage-rhaap-x.y.z.tgz' + pluginConfig: + dynamicPlugins: + frontend: + ansible.plugin-backstage-rhaap: + appIcons: + - importName: AnsibleLogo + name: AnsibleLogo + dynamicRoutes: + - importName: AnsiblePage + menuItem: + icon: AnsibleLogo + text: Ansible + path: /ansible + - disabled: false + integrity: + package: >- + http://plugin-registry:8080/ansible-plugin-scaffolder-backend-module-backstage-rhaap-x.y.z.tgz + pluginConfig: + dynamicPlugins: + backend: + ansible.plugin-scaffolder-backend-module-backstage-rhaap: null + - disabled: false + integrity: + package: >- + http://plugin-registry:8080/ansible-plugin-backstage-rhaap-backend-x.y.z.tgz + pluginConfig: + dynamicPlugins: + backend: + ansible.plugin-backstage-rhaap-backend: null +... +upstream: + backstage: + ... + extraAppConfig: + - configMapRef: app-config-rhdh + filename: app-config-rhdh.yaml + extraContainers: + - command: + - adt + - server + image: >- + registry.redhat.io/ansible-automation-platform-25/ansible-dev-tools-rhel8:latest + imagePullPolicy: IfNotPresent + name: ansible-devtools-server + ports: + - containerPort: 8000 +... + + +---- + diff --git a/downstream/modules/devtools/ref-rhdh-learning.adoc b/downstream/modules/devtools/ref-rhdh-learning.adoc new file mode 100644 index 0000000000..293fcaf7cf --- /dev/null +++ b/downstream/modules/devtools/ref-rhdh-learning.adoc @@ -0,0 +1,13 @@ +:_mod-docs-content-type: REFERENCE + +[id="rhdh-learning_{context}"] += Learning about Ansible + +To learn more about getting started with automation, click *Learn* from the *Overview* page of the plug-in dashboard. +The *Learn* page provides the following options for learning: + +* *Learning Paths* lists a curated selection of learning tools hosted on developers.redhat.com that guide you through the foundations of working with Ansible, the Ansible {VSCode} extension, and using YAML. ++ +You can select other Ansible learning paths from the *Useful links* section. +* *Labs* are self-led labs that are designed to give you hands-on experience in writing Ansible content and using {ToolsName}. + diff --git a/downstream/modules/devtools/snippets b/downstream/modules/devtools/snippets new file mode 120000 index 0000000000..7bf6da9a51 --- /dev/null +++ b/downstream/modules/devtools/snippets @@ -0,0 +1 @@ +../../snippets \ No newline at end of file diff --git a/downstream/modules/eda/con-characterizing-your-workload.adoc b/downstream/modules/eda/con-characterizing-your-workload.adoc new file mode 100644 index 0000000000..cd064c636a --- /dev/null +++ b/downstream/modules/eda/con-characterizing-your-workload.adoc @@ -0,0 +1,12 @@ +[id="characterizing-your-workload"] + += Characterizing your workload + +[role="_abstract"] +In {EDAcontroller}, your workload includes the number of rulebook activations and events being received. Consider the following factors to characterize your {EDAcontroller} workload: + +. Number of simultaneous rulebook activations +. Number of events received by {EDAcontroller} + +include::con-modifying-simultaneous-activations.adoc[leveloffset=+1] +include::con-modifying-memory-limit.adoc[leveloffset=+1] diff --git a/downstream/modules/eda/con-credential-types-list-view.adoc b/downstream/modules/eda/con-credential-types-list-view.adoc new file mode 100644 index 0000000000..9d1c192825 --- /dev/null +++ b/downstream/modules/eda/con-credential-types-list-view.adoc @@ -0,0 +1,16 @@ +[id="eda-credentials-list-view"] + += Credentials list view + +When you log in to the {PlatformNameShort} and select {MenuADCredentials}, the Credentials page has a pre-loaded *Decision Environment Container Registry* credential. When you create your own credentials, they will be added to this list view. . + +From the menu bar, you can search for credentials in the *Name* search field. + +You also have the following options in the menu bar: + +* Choose how fields are shown in the list view by clicking the btn:[Manage columns] icon. You have four options in which you can arrange your fields: +** *Column* - Shows the column in the table. +** *Description* - Shows the column when the item is expanded as a full width description. +** *Expanded* - Shows the column when the item is expanded as a detail. +** *Hidden* - Hides the column. +* Choose between a btn:[List view] or a btn:[Card view], by clicking the icons. diff --git a/downstream/modules/eda/con-credentials-list-view.adoc b/downstream/modules/eda/con-credentials-list-view.adoc index d56aec530b..9d1c192825 100644 --- a/downstream/modules/eda/con-credentials-list-view.adoc +++ b/downstream/modules/eda/con-credentials-list-view.adoc @@ -2,11 +2,15 @@ = Credentials list view -On the *Credentials* page, you can view the list of created credentials that you have created along with the *Type* of credential. +When you log in to the {PlatformNameShort} and select {MenuADCredentials}, the Credentials page has a pre-loaded *Decision Environment Container Registry* credential. When you create your own credentials, they will be added to this list view. . -From the menu bar, you can search for credentials in the *Name* field. +From the menu bar, you can search for credentials in the *Name* search field. You also have the following options in the menu bar: -* Choose which columns are shown in the list view by clicking btn:[Manage columns]. +* Choose how fields are shown in the list view by clicking the btn:[Manage columns] icon. You have four options in which you can arrange your fields: +** *Column* - Shows the column in the table. +** *Description* - Shows the column when the item is expanded as a full width description. +** *Expanded* - Shows the column when the item is expanded as a detail. +** *Hidden* - Hides the column. * Choose between a btn:[List view] or a btn:[Card view], by clicking the icons. diff --git a/downstream/modules/eda/con-custom-credential-types.adoc b/downstream/modules/eda/con-custom-credential-types.adoc new file mode 100644 index 0000000000..d3db52d9b9 --- /dev/null +++ b/downstream/modules/eda/con-custom-credential-types.adoc @@ -0,0 +1,71 @@ +[id="eda-custom-credential-types"] + += Custom credential types + +As a system administrator, you can define a custom credential type that works in ways similar to existing credential types in a standard format using a YAML or JSON-like definition. + +Each credential type displays its own unique configurations in the Input Configuration field and the Injector Configuration field, if applicable. Custom credentials support Ansible extra variables as a means of injecting their authentication information. + +You can attach one or more cloud, vault, and {PlatformName} credentials to a rulebook activation. + +[NOTE] +==== +* When creating a new credential type, you must avoid collisions in the `extra_vars`. +* Extra variable names must not start with *EDA_* because they are reserved. +* You must have System administrator (superuser) permissions to be able to create and edit a credential type and to be able to view the *Injector configuration* field. +==== + +When you customize your own credential types, they will display on the Credential Types page along with a list of built-in credential types. + +Each credential type displays its own unique configurations in the Input Configuration and the Injector Configuration fields, if applicable. Both YAML and JSON formats are supported in the configuration fields. +//Note from J. Self: REVIEWERS, please confirm the Note above along with the paragraph about attachning one SSH and multiple clouds to a job template. I copied this from automation controller content, but not entirely sure it's relevant to EDA. + +[discrete] +== Input Configuration + +The Input configuration has two attributes: + +* fields - a collection of properties for a credential type. +* required - a list of required fields. + +Fields can have multiple properties, depending on the credential type you select. + +.Input Configuration Field Properties +[cols="a,a,a"] +|=== +| Fields | Description | Mandatory (Y/N) + +h| id | Unique id of the field; must be a string type and stores the variable name | Yes + +h| type | Can be string or boolean type | No, default is string + +h| label | Used by the UI when rendering the UI element | Yes + +h| secret | Will be encrypted | No, default false + +h| multiline | If the field contains data from a file the multiline can be set to True | No, default false + +h| help_text | The help text associated with this field | No + +|=== + +[discrete] +== Injector Configuration + +You can use Injector configuration to extract information from Input configuration fields and map them into injector types that can be sent to ansible-rulebook when running a rulebook activation. {EDAName} supports the following types of injectors: + +* Environment variables (`env`) - Used in source plugins for the underlying package or shared library. +* Ansible extra variables (`extra_vars`) - Used for substitution in the rulebook conditions, actions or source plugin parameters. +* File-based templating (`file`) - Used to create file contents from the credential inputs such as certificates and keys, which might be required by source plugins. File injectors provide a way to deliver these certificates and keys to ansible-rulebook at runtime without having to store them in decision environments. As a result, ansible-rulebook creates temporary files and the file names can be accessed using `eda.filename` variables, which are automatically created for you after the files have been created (for instance, "{{eda.filename.my_cert}}”). + +[IMPORTANT] +==== +When creating `extra_vars` in rulebook activations and credential type injectors, avoid using `eda` or `ansible` as key names since that conflicts with internal usage and might cause failure in both rulebook activations and credential type creation. +==== + +Injectors enable you to adjust the fields so that they can be injected into a rulebook as one of the above-mentioned injector types, which cannot have duplicate keys at the top level. If you have two sources in a rulebook that both require parameters such as username and password, the injectors, along with the rulebook, help you adapt the arguments for each source. + +To view a sample injector and input, see the following GitHub gists, respectively: + +* link:https://gist.github.com/mkanoor/f080648917377da870bb002d4563294d[credential injectors] +* link:https://gist.github.com/mkanoor/04c32b20addb7898af299a9254a46e61#file-gssapi-input-credential-type[gssapi input credential type] \ No newline at end of file diff --git a/downstream/modules/eda/con-eda-author-event-filters.adoc b/downstream/modules/eda/con-eda-author-event-filters.adoc new file mode 100644 index 0000000000..ec38ff555b --- /dev/null +++ b/downstream/modules/eda/con-eda-author-event-filters.adoc @@ -0,0 +1,33 @@ +[id="eda-author-event-filters"] + += Author event filters + +Event filters are functions in a python module that perform transformations on the event data. +They can remove, add, change, or move any data in the event data structure. +Event filters take the event as the first argument and additional keyword arguments are provided by the configuration in the rulebook. + +The basic structure follows: + +---- + # my_namespace.my_collection/extensions/eda/plugins/event_filter/my_filter.py + def main(event: dict, arg1, arg2): + # Process event data here + return event +---- + +You can use this filter in a rulebook by adding it to the filters list in an event source: + +---- + sources: + - name: azure_service_bus + ansible.eda.azure_service_bus: + conn_str: "{{connection_str}}" + queue_name: "{{queue_name}}" + filters: + - my_namespace.my_collection.my_filter: + arg1: hello + arg2: world +---- + +.Additional resources +See the event filter plugins in link:https://github.com/ansible/event-driven-ansible/tree/main/extensions/eda/plugins/event_filter[ansible.eda collection] for more examples of how to author them. diff --git a/downstream/modules/eda/con-eda-projects-list-view.adoc b/downstream/modules/eda/con-eda-projects-list-view.adoc index 64887ae029..bb6c6ac36b 100644 --- a/downstream/modules/eda/con-eda-projects-list-view.adoc +++ b/downstream/modules/eda/con-eda-projects-list-view.adoc @@ -6,7 +6,7 @@ On the *Projects* page, you can view the projects that you have created along wi [NOTE] ==== -If a rulebook changes in source control you can re-sync a project by selecting the sync icon next to the project from the *Projects* list view. +If a rulebook changes in source control, you can re-sync a project by selecting the sync icon next to the project from the *Projects* list view. The *Git hash* updates represent the latest commit on that repository. An activation must be restarted or recreated if you want to use the updated project. ==== diff --git a/downstream/modules/eda/con-eda-rulebook-activation-list-view.adoc b/downstream/modules/eda/con-eda-rulebook-activation-list-view.adoc index c395e506e4..60011424e0 100644 --- a/downstream/modules/eda/con-eda-rulebook-activation-list-view.adoc +++ b/downstream/modules/eda/con-eda-rulebook-activation-list-view.adoc @@ -2,12 +2,13 @@ = Rulebook activation list view -On the *Rulebook Activations* page, you can view the rulebook activations that you have created along with the *Activation status*, *Number of rules associated* with the rulebook, the *Fire count*, and *Restart count*. +On the *Rulebook Activations* page, you can view the rulebook activations that you have created along with the *Status*, *Number of rules* with the rulebook, the *Fire count*, and *Restart count*. -If the *Activation Status* is *Running*, it means that the rulebook activation is running in the background and executing the required actions according to the rules declared in the rulebook. +If the *Status* is *Running*, it means that the rulebook activation is running in the background and executing the required actions according to the rules declared in the rulebook. You can view more details by selecting the activation from the *Rulebook Activations* list view. +//Replace this screen shot with current view image::eda-rulebook-activations-list-view.png[Rulebook activation][width=25px] For all activations that have run, you can view the *Details* and *History* tabs to get more information about what happened. diff --git a/downstream/modules/eda/con-event-streams.adoc b/downstream/modules/eda/con-event-streams.adoc new file mode 100644 index 0000000000..20fa6f0e39 --- /dev/null +++ b/downstream/modules/eda/con-event-streams.adoc @@ -0,0 +1,39 @@ + +[id="event-streams"] + += Event streams + +[role="_abstract"] +Event streams can send events from remote systems to {EDAcontroller}. In a typical set-up, a server sends data to an event stream over the internet to an {EDAName} event stream receiver. When the data comes over the internet, the request must be authenticated. Depending on the webhook vendor or remote system, the authentication method could differ. + +{EDAcontroller} supports six different event stream types. + +.Event Stream Types +[cols="a,a,a"] +|=== +| Type | Description | Vendors + +h| HMAC | Hashed Message Authentication Code (HMAC). Uses a shared secret between {EDAcontroller} and the vendors webhook server. This guarantees message integrity. | Github + +h| Basic Authentication | Uses HTTP basic authentication. | Datadog, Dynatrace + +h| Token Authentication | Uses Token Authentication. Usually the HTTP Header is *Authorization* but some vendors like Gitlab use *X-Gitlab-Token*. | Gitlab, ServiceNow + +h| OAuth2 | Uses Machine-to-Machine (M2M) mode with a grant type called *client credentials*. The token is opaque. | Dynatrace + +h| OAuth2 with JWT | Uses M2M mode with a grant type called *client credentials*. The token is JSON Web Token (JWT). | Datadog + +h| ECDSA | Elliptic Curve Digital Signature Algorithm | SendGrid, Twilio + +//[Jameria] Not currently supported; will leave commented out for now in the event that it is supported in the near future. h| Mutual TLS | Needs the vendor's CA certificate to be present in our servers at startup. This supports non-repudiation. +// | PagerDuty +|=== + +{EDAcontroller} also supports four other specialized event streams that are based on the six basic event stream types: + +* GitLab Event Stream +* GitHub Event Stream +* ServiceNow Event Stream +* Dynatrace Event Stream + +These specialized types limit the parameters you use by adding default values. For example, the GitHub Event Stream is a specialization of the HMAC Event Stream with many of the fields already populated. After the GitHub Event Stream credential has been saved, the recommended defaults for the GitHub Event Stream are displayed. \ No newline at end of file diff --git a/downstream/modules/eda/con-modifying-memory-limit.adoc b/downstream/modules/eda/con-modifying-memory-limit.adoc new file mode 100644 index 0000000000..bcce7b419e --- /dev/null +++ b/downstream/modules/eda/con-modifying-memory-limit.adoc @@ -0,0 +1,19 @@ +[id="modifying-memory-limit"] + += Modifying the default memory limit for each rulebook activation + +[role="_abstract"] +Memory usage is based on the number of events that {EDAcontroller} has to process. +Each rulebook activation container has a 200MB memory limit. +For example, with 4 CPU and 16GB of RAM, one rulebook activation container with an assigned 200MB memory limit can not handle more than 150,000 events per minute. +If the number of parallel running rulebook activations is higher, then the maximum number of events each rulebook activation can process is reduced. +If there are too many incoming events at a very high rate, the container can run out of memory trying to process the events. +This will kill the container, and your rulebook activation will fail with a status code of 137. + +To address this failure, you can increase the amount of memory allocated to rulebook activations in order to process a high number of events at a high rate by using one of the following procedures: + +* Modifying the default memory limit for each rulebook activation during installation +* Modifying the default memory limit for each rulebook activation after installation + +include::proc-modifying-memory-during-install.adoc[leveloffset=+1] +include::proc-modifying-memory-after-install.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/modules/eda/con-modifying-simultaneous-activations.adoc b/downstream/modules/eda/con-modifying-simultaneous-activations.adoc new file mode 100644 index 0000000000..9801204b37 --- /dev/null +++ b/downstream/modules/eda/con-modifying-simultaneous-activations.adoc @@ -0,0 +1,23 @@ +[id="modifying-simultaneous-activations"] + += Modifying the number of simultaneous rulebook activations + +[role="_abstract"] +By default, {EDAcontroller} allows 12 rulebook activations per node. For example, with two worker or hybrid nodes, it results in a limit of 24 activations in total to run simultaneously. +If more than 24 rulebook activations are created, the expected behavior is that subsequent rulebook activations wait until there is an available rulebook activation worker. +In this case, the rulebook activation status is displayed as *Pending* even if there is enough free memory and CPU on your {EDAcontroller} instance. +To change this behavior, you must change the default maximum number of running rulebook activations. + +[NOTE] +==== +* The value for `MAX_RUNNING_ACTIVATIONS` does not change when you modify the instance size, so it needs to be adjusted manually. +* If you are installing {EDAName} on {OCPShort}, the 12 rulebook activations per node is a global value since there is no concept of worker nodes when installing {EDAName} on {OCPShort}. For more information, see link:{URLOperatorInstallation}/operator-install-planning#modifying_the_number_of_simultaneous_rulebook_activations_during_or_after_event_driven_ansible_controller_installation[Modifying the number of simultaneous rulebook activations during or after {EDAcontroller} installation] in link:{LinkOperatorInstallation}. +==== + +include::proc-modifying-activations-during-install.adoc[leveloffset=+1] +include::proc-modifying-activations-after-install.adoc[leveloffset=+1] + +.Additional Resources +* For more information about rulebook activations, see the link:https://access.redhat.com/documentation/en-us/red_hat_ansible_automation_platform/2.4/html-single/event-driven_ansible_controller_user_guide/index#eda-rulebook-activations[Rulebook activations]. +* For more information about modifying simultaneous rulebook activations during or after {EDAName} on {OCPShort}, see link:{URLOperatorInstallation}/appendix-operator-crs_performance-considerations#eda_max_running_activations[EDA_MAX_RUNNING_ACTIVATIONS]. + diff --git a/downstream/modules/eda/con-replacing-controller-tokens.adoc b/downstream/modules/eda/con-replacing-controller-tokens.adoc new file mode 100644 index 0000000000..a97bbd5862 --- /dev/null +++ b/downstream/modules/eda/con-replacing-controller-tokens.adoc @@ -0,0 +1,6 @@ +[id="replacing-controller-tokens"] + += Replacing controller tokens in {PlatformName} {PlatformVers} + + +To use {EDAcontroller} in {PlatformName} {PlatformVers}, you must replace legacy controller tokens configured in your environment with {PlatformName} credentials because controller tokens have been deprecated. \ No newline at end of file diff --git a/downstream/modules/eda/con-system-level-monitoring.adoc b/downstream/modules/eda/con-system-level-monitoring.adoc new file mode 100644 index 0000000000..38d3e7b718 --- /dev/null +++ b/downstream/modules/eda/con-system-level-monitoring.adoc @@ -0,0 +1,18 @@ +[id="system-level-monitoring"] + += System level monitoring for {EDAcontroller} + +[role="_abstract"] +After characterizing your workload to determine how many rulebook activations you are running in parallel and how many events you are receiving at any given point, you must consider monitoring your {EDAcontroller} host at the system level. +Using system level monitoring to review information about {EDAName}’s performance over time helps when diagnosing problems or when considering capacity for future growth. + +System level monitoring includes the following information: + +* Disk I/O +* RAM utilization +* CPU utilization +* Network traffic + +Higher CPU, RAM, or Disk utilization can affect the overall performance of {EDAcontroller}. +For example, a high utilization of any of these system level resources indicates that either the {EDAcontroller} is running too many rulebook activations, or some of the individual rulebook activations are using a high volume of resources. +In this case, you must increase your system level resources to support your workload. diff --git a/downstream/modules/eda/proc-eda-activate-webhook.adoc b/downstream/modules/eda/proc-eda-activate-webhook.adoc index 30c282a800..8150d6b9ff 100644 --- a/downstream/modules/eda/proc-eda-activate-webhook.adoc +++ b/downstream/modules/eda/proc-eda-activate-webhook.adoc @@ -6,7 +6,7 @@ In Openshift environments, you can allow webhooks to reach an activation-job-pod .Prerequisites -* You have created a rulebook activation in the {EDAcontroller} Dashboard. +* You have created a rulebook activation. [NOTE] ==== @@ -71,4 +71,4 @@ test-sync-bug-dynatrace.apps.aap-dt.ocp4.testing.ansible.com -d [NOTE] ==== You do not need the port as it is specified on the Route (targetPort). -==== \ No newline at end of file +==== diff --git a/downstream/modules/eda/proc-eda-activation-keeps-restarting.adoc b/downstream/modules/eda/proc-eda-activation-keeps-restarting.adoc new file mode 100644 index 0000000000..cd7b049a58 --- /dev/null +++ b/downstream/modules/eda/proc-eda-activation-keeps-restarting.adoc @@ -0,0 +1,23 @@ +[id="eda-activation-keeps-restarting"] + += Activation keeps restarting + +Perform the following steps if your rulebook activation keeps restarting. + +.Procedure +. Log in to {PlatformNameShort}. +. From the navigation panel, select {MenuADRulebookActivations}. +. From the *Rulebook Activations* page, select the activation in your list that keeps restarting. The Details page is displayed. +. Click the *History* tab for more information and select the rulebook activation that keeps restarting. The Details tab is displayed and shows the output information. +. Check the *Restart policy* field for your activation. ++ +There are three selections available: *On failure* (restarts a rulebook activation when the container process fails), *Always* (always restarts regardless of success or failure with no more than 5 restarts), or *Never* (never restarts when the container process ends). ++ +.. Confirm that your rulebook activation Restart policy is set to *On failure*. This is an indication that an issue is causing it to fail. +.. To possibly diagnose the problem, check the YAML code and the instance logs of the rulebook activation for errors. +.. If you cannot find a solution with the restart policy values, proceed to the next steps related to the *Log level*. +. Check your log level for your activation. +.. If your default log level is *Error*, go back to the *Rulebook Activation* page and recreate your activation following procedures in link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-rulebook-activations#eda-set-up-rulebook-activation[Setting up rulebook a activation]. +.. Change the *Log level* to *Debug*. +.. Run the activation again and navigate to the *History* tab from the activation details page. +.. On the *History* page, click one of your recent activations and view the *Output*. diff --git a/downstream/modules/eda/proc-eda-activation-stuck-pending.adoc b/downstream/modules/eda/proc-eda-activation-stuck-pending.adoc new file mode 100644 index 0000000000..da923c9b30 --- /dev/null +++ b/downstream/modules/eda/proc-eda-activation-stuck-pending.adoc @@ -0,0 +1,25 @@ +[id="eda-activation-stuck-pending"] + += Activation stuck in Pending state + +Perform the following steps if your rulebook activation is stuck in *Pending* state. + +.Procedure + +. Confirm whether there are other running activations and if you have reached the limits (for example, memory or CPU limits). +.. If there are other activations running, terminate one or more of them, if possible. +.. If not, check that the default worker, Redis, and activation worker are all running. If all systems are working as expected, check your eda-server internal logs in the worker, scheduler, API, and nginx containers and services to see if the problem can be determined. ++ +[NOTE] +==== +These logs reveal the source of the issue, such as an exception thrown by the code, a runtime error with network issues, or an error with the rulebook code. If your internal logs do not provide information that leads to resolution, report the issue to Red Hat support. +==== + +.. If you need to make adjustments, see the link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-performance-tuning#modifying-simultaneous-activations[Modifying the number of simultaneous rulebook activations]. ++ +[NOTE] +==== +To adjust the maximum number of simultaneous activations for {OperatorPlatformNameShort} on {OCPShort} deployments, see link:{URLOperatorInstallation}/operator-install-planning#modifying_the_number_of_simultaneous_rulebook_activations_during_or_after_event_driven_ansible_controller_installation[Modifying the number of simultaneous rulebook activations during or after {EDAcontroller} installation] in link:{LinkOperatorInstallation}. +==== + + diff --git a/downstream/modules/eda/proc-eda-build-a-custom-decision-environment.adoc b/downstream/modules/eda/proc-eda-build-a-custom-decision-environment.adoc index 828d56594f..a0c8130d36 100644 --- a/downstream/modules/eda/proc-eda-build-a-custom-decision-environment.adoc +++ b/downstream/modules/eda/proc-eda-build-a-custom-decision-environment.adoc @@ -1,22 +1,31 @@ [id="eda-build-a-custom-decision-environment"] -= Building a custom decision environment for {EDAName} within {PlatformNameShort} += Building a custom decision environment for {EDAName} -Use the following instructions if you need a custom decision environment to provide a custom maintained or third-party event source plugin that is not available in the default decision environment. +Decision Environments are {ExecEnvShort}s tailored towards running Ansible Rulebooks. + +Similar to {ExecEnvShort}s that run Ansible playbooks for {ControllerName}, decision environments are designed to run rulebooks for {EDAcontroller}. + +You can create a custom decision environment for {EDAName} that provides a custom maintained or third-party event source plugin that is not available in the default decision environment. .Prerequisites -* {PlatformNameShort} > = 2.4 +* {PlatformNameShort} > = 2.5 * {EDAName} * {Builder} > = 3.0 .Procedure -* Add the `de-supported` decision environment. This image is built from a base image provided by Red Hat called `de-minimal`. +* Use `de-minimal` as the base image with {Builder} to build your custom decision environments. +This image is built from a base image provided by Red Hat at link:https://catalog.redhat.com/software/containers/ansible-automation-platform-25/de-supported-rhel9/650a5674ad524b664b693729[{PlatformNameShort} supported decision environment]. + + -[NOTE] +[IMPORTANT] ==== -Red Hat recommends using `de-minimal` as the base image with {Builder} to build your custom decision environments. +* Use the correct {EDAcontroller} decision environment in {PlatformNameShort} to prevent rulebook activation failure. + +** If you want to connect {EDAcontroller} to {PlatformNameShort} 2.4, you must use `registry.redhat.io/ansible-automation-platform-24/de-supported-rhel8:latest` +** If you want to connect {EDAcontroller} to {PlatformNameShort} {PlatformVers}, you must use `registry.redhat.io/ansible-automation-platform-25/de-supported-rhel8:latest` ==== The following is an example of the {Builder} definition file that uses `de-minimal` as a base image to build a custom decision environment with the ansible.eda collection: @@ -25,7 +34,7 @@ version: 3 images: base_image: - name: 'registry.redhat.io/ansible-automation-platform-24/de-minimal-rhel8:latest' + name: 'registry.redhat.io/ansible-automation-platform-25/de-minimal-rhel8:latest' dependencies: galaxy: @@ -44,7 +53,7 @@ version: 3 images: base_image: - name: 'registry.redhat.io/ansible-automation-platform-24/de-minimal-rhel8:latest' + name: 'registry.redhat.io/ansible-automation-platform-25/de-minimal-rhel8:latest' dependencies: galaxy: diff --git a/downstream/modules/eda/proc-eda-cannot-connect-to-controller.adoc b/downstream/modules/eda/proc-eda-cannot-connect-to-controller.adoc new file mode 100644 index 0000000000..9376f32fbb --- /dev/null +++ b/downstream/modules/eda/proc-eda-cannot-connect-to-controller.adoc @@ -0,0 +1,12 @@ +[id="eda-cannot-connect-to-controller"] + += Cannot connect to the 2.5 {ControllerName} when running activations + +You might experience a failed connection to {ControllerName} when you run your activations. + +.Procedure +. To help resolve the issue, confirm that you have set up a {PlatformName} credential and have obtained the correct {ControllerName} URL. +.. If you have not set up a {PlatformName} credential, follow the procedures in link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-set-up-rhaap-credential-type#eda-set-up-rhaap-credential[Setting up a {PlatformName} credential]. Ensure that this credential has the host set to the following URL format: https:///api/controller + +.. When you have completed this process, try setting up your rulebook activation again. + diff --git a/downstream/modules/eda/proc-eda-check-rule-audit-event-stream.adoc b/downstream/modules/eda/proc-eda-check-rule-audit-event-stream.adoc new file mode 100644 index 0000000000..7868e5e11a --- /dev/null +++ b/downstream/modules/eda/proc-eda-check-rule-audit-event-stream.adoc @@ -0,0 +1,13 @@ +[id="eda-check-rule-audit-event-stream"] + += Check the Rule Audit for events on your new event stream + +When events have been sent and received by {EDAcontroller}, you can confirm that actions have been triggered by going to the Rule Audit page and viewing the event stream results. + +.Procedure +. Log in to {PlatformNameShort}. +. From the navigation panel, select {MenuADRuleAudit}. ++ +If your rulebook activation received the event data from the event stream type you selected, the Rule Audit page displays the results similar to this image. ++ +image:eda-rule-audit-event-streams.png[Rule audit - Event stream] \ No newline at end of file diff --git a/downstream/modules/eda/proc-eda-config-remote-sys-to-events.adoc b/downstream/modules/eda/proc-eda-config-remote-sys-to-events.adoc new file mode 100644 index 0000000000..d9820c9936 --- /dev/null +++ b/downstream/modules/eda/proc-eda-config-remote-sys-to-events.adoc @@ -0,0 +1,32 @@ +[id="eda-config-remote-sys-to-events"] + += Configuring your remote system to send events + +After you have created your event stream, you must configure your remote system to send events to {EDAcontroller}. The method used for this configuration varies, depending on the vendor for the event stream credential type you select. + +.Prerequisites + +* The URL that was generated when you created your event stream +* Secrets or passwords that you set up in your event stream credential + +.Procedure + +The following example demonstrates how to configure webhooks in a remote system like GitHub to send events to {EDAcontroller}. Each vendor will have unique methods for configuring your remote system to send events to {EDAcontroller}. + +. Log in to your GitHub repository. +. Click *Your profile name → Your repositories*. + +[NOTE] +==== +If you do not have a repository, click *New* to create a new one, select an owner, add a *Repository name*, and click *Create repository*. +==== + +. Navigate to *Settings* (tool bar). +. In the *General* navigation pane, select *Webhooks*. +. Click *Add webhook*. +. In the *Payload URL* field, paste the URL you saved when you created your event stream. +. Select *application/json* in the *Content type* list. +. Enter your *Secret*. +. Click *Add webhook*. + +After the webhook has been added, it attempts to send a test payload to ensure there is connectivity between the two systems (GitHub and {EDAcontroller}). If it can successfully send the data you will see a green check mark next to the *Webhook URL* with the message, *Last delivery was successful*. \ No newline at end of file diff --git a/downstream/modules/eda/proc-eda-create-event-stream-credential.adoc b/downstream/modules/eda/proc-eda-create-event-stream-credential.adoc new file mode 100644 index 0000000000..837841d008 --- /dev/null +++ b/downstream/modules/eda/proc-eda-create-event-stream-credential.adoc @@ -0,0 +1,32 @@ +[id="eda-create-event-stream-credential"] + += Creating an event stream credential + +You must create an event stream credential first before you can use an event stream. + +.Prerequisites + +* Each event stream must have exactly one credential. + +.Procedure + +. Log in to the {PlatformNameShort} Dashboard. +. From the navigation panel, select {MenuADCredentials}. +. Click btn:[Create credential]. +. Insert the following: ++ +Name:: Insert the name. +Description:: This field is optional. +Organization:: Click the list to select an organization or select *Default*. +Credential type:: Click the list to select your Credential type. ++ +[NOTE] +==== +When you select the credential type, the *Type Details* section is displayed with fields that are applicable for the credential type you selected. +==== + +Type Details:: Add the requested information for the credential type you selected. For example, if you selected the GitHub Event Stream credential type, you are required to add an HMAC Secret (symmetrical shared secret) between {EDAcontroller} and the remote server. + +. Click btn:[Create credential]. + +The Details page is displayed. From there or the *Credentials* list view, you can edit or delete it. diff --git a/downstream/modules/eda/proc-eda-create-event-stream.adoc b/downstream/modules/eda/proc-eda-create-event-stream.adoc new file mode 100644 index 0000000000..580e5822e6 --- /dev/null +++ b/downstream/modules/eda/proc-eda-create-event-stream.adoc @@ -0,0 +1,48 @@ +[id="eda-create-event-stream"] + += Creating an event stream + +You can create event streams that will be attached to a rulebook activation. + +.Prerequisites + +* If you will be attaching your event stream to a rulebook activation, ensure that your activation has a decision environment and project already set up. +* If you plan to connect to {ControllerName} to run your rulebook activation, ensure that you have created a {PlatformName} credential type in addition to the decision environment and project. For more information, see xref:eda-set-up-rhaap-credential[Setting up a {PlatformName} credential]. + +.Procedure + +. Log in to {PlatformNameShort}. +. From the navigation panel, select {MenuADEventStreams}. +. Click btn:[Create event stream]. +. Insert the following: ++ +Name:: Insert the name. +Organization:: Click the list to select an organization or select *Default*. +Event stream type:: Select the event stream type you prefer. ++ +[NOTE] +==== +This list displays at least 10 default event stream types that can be used to authenticate the connection coming from your remote server. +==== +Credentials:: Select a credential from the list, preferably the one you created for your event stream. +Headers:: Enter HTTP header keys, separated by commas, that you want to include in the event payload. To include all headers, leave the field empty. + +Forward events to rulebook activation:: Use this option to enable or disable the capability of forwarding events to rulebook activations. ++ +[NOTE] +==== +The event stream's event forwarding can be disabled for testing purposes while diagnosing connections and evaluating the incoming data. Disabling the *Forward events to rulebook activation* option allows you to test the event stream connection with the remote system, analyze the header and payload, and if necessary, diagnose credential issues. This ensures that events are not be forwarded to rulebook activations causing rules and conditions to be triggered inadvertently while you are in test mode. Some enterprises might have policies to change secrets and passwords at regular cadence. You can enable/disable this option anytime after the event stream is created. +==== + +. Click btn:[Create event stream]. + +After creating your event stream, the following outputs occur: + +* The Details page is displayed. From there or the Event Streams list view, you can edit or delete it. Also, the Event Streams page shows all of the event streams you have created and the following columns for each event: *Events received*, *Last event received*, and *Event stream type*. As the first two columns receive external data through the event stream, they are continuously updated to let you know they are receiving events from remote systems. +* If you disabled the event stream, the Details page is displayed with a warning message, *This event stream is disabled*. +* Your new event stream generates a URL that is necessary when you configure the webhook on the remote system that sends events. + +[NOTE] +==== +After an event stream is created, the associated credential cannot be deleted until the event stream it is attached to is deleted. +==== diff --git a/downstream/modules/eda/proc-eda-delete-controller-token.adoc b/downstream/modules/eda/proc-eda-delete-controller-token.adoc new file mode 100644 index 0000000000..eb9d9895b4 --- /dev/null +++ b/downstream/modules/eda/proc-eda-delete-controller-token.adoc @@ -0,0 +1,18 @@ +[id="eda-delete-controller-token"] + += Deleting controller tokens + +Before you can set up {PlatformName} credentials, you must delete any existing controller tokens. + +.Prerequisites +* You have deleted all rulebook activations that use controller tokens. + +.Procedure + +. Log in to the {PlatformNameShort} Dashboard. +. From the top navigation panel, select your profile. +. Click *User details*. +. Select the *Tokens* tab. +. Delete all of your previous controller tokens. + +After deleting the controller tokens and rulebook activations, proceed with xref:eda-set-up-rhaap-credential[Setting up a {PlatformName} credential]. diff --git a/downstream/modules/eda/proc-eda-delete-credential.adoc b/downstream/modules/eda/proc-eda-delete-credential.adoc index da26398250..6f3c791780 100644 --- a/downstream/modules/eda/proc-eda-delete-credential.adoc +++ b/downstream/modules/eda/proc-eda-delete-credential.adoc @@ -2,12 +2,19 @@ = Deleting a credential +You can delete credentials if they are no longer needed for your organization. + .Procedure . Delete the credential by using one of these methods: * From the *Credentials* list view, click the btn:[More Actions] icon *{MoreActionsIcon}* next to the desired credential and click btn:[Delete credential]. * From the *Credentials* list view, select the name of the credential, click the btn:[More Actions] icon *{MoreActionsIcon}* next to btn:[Edit credential], and click btn:[Delete credential]. . In the pop-up window, select *Yes, I confirm that I want to delete this credential*. ++ +[NOTE] +==== +If your credential is still in use by other resources in your organization, a warning message is displayed letting you know that the credential cannot be deleted. Also, if your credential is being used in an event stream, you cannot delete it until the event stream is deleted or attached to a different credential. In general, avoid deleting a credential that is in use because it can lead to broken activations. +==== . Click btn:[Delete credential]. You can delete multiple credentials at a time by selecting the checkbox next to each credential and clicking the btn:[More Actions] icon *{MoreActionsIcon}* in the menu bar and then clicking btn:[Delete selected credentials]. diff --git a/downstream/modules/eda/proc-eda-delete-project.adoc b/downstream/modules/eda/proc-eda-delete-project.adoc index 3bf025b256..ef0a7ba5d5 100644 --- a/downstream/modules/eda/proc-eda-delete-project.adoc +++ b/downstream/modules/eda/proc-eda-delete-project.adoc @@ -2,8 +2,12 @@ = Deleting a project +If you need to delete a project, the {EDAcontroller} interface provides multiple options. + .Procedure -. From the *Projects* list view, select the btn:[More Actions] icon *{MoreActionsIcon}* next to the desired project. +. To delete a project, complete one of the following: +* From the *Projects* list view, select the checkbox next to the desired project, and click the btn:[More Actions] icon *{MoreActionsIcon}* from the page menu. +* From the *Projects* list view, click the btn:[More Actions] icon *{MoreActionsIcon}* next to the desired project. . Select btn:[Delete project]. -. In the popup window, select btn:[Yes, I confirm that I want to delete this project]. +. In the *Permanently delete projects* window, select btn:[Yes, I confirm that I want to delete this project]. . Select btn:[Delete project]. \ No newline at end of file diff --git a/downstream/modules/eda/proc-eda-delete-rulebook-activations-with-cont-tokens.adoc b/downstream/modules/eda/proc-eda-delete-rulebook-activations-with-cont-tokens.adoc new file mode 100644 index 0000000000..8f96a3a7e8 --- /dev/null +++ b/downstream/modules/eda/proc-eda-delete-rulebook-activations-with-cont-tokens.adoc @@ -0,0 +1,17 @@ +[id="eda-delete-rulebook-activations-with-cont-tokens"] + += Deleting rulebook activations with controller tokens + +To replace the controller tokens, you must delete the rulebook activations that were associated with them. + +.Procedure + +. Log in to the {PlatformNameShort} Dashboard. +. From the top navigation panel, select {MenuADRulebookActivations}. +. Select the rulebook activations that have controller tokens. +. Select the btn:[More Actions] icon *{MoreActionsIcon}* next to the *Rulebook Activation enabled/disabled* toggle. +. Select btn:[Delete rulebook activation]. +. In the window, select btn:[Yes, I confirm that I want to delete these X rulebook activations]. +. Select btn:[Delete rulebook activations]. + + diff --git a/downstream/modules/eda/proc-eda-delete-rulebook-activations.adoc b/downstream/modules/eda/proc-eda-delete-rulebook-activations.adoc index 0a510d8df0..da43e61516 100644 --- a/downstream/modules/eda/proc-eda-delete-rulebook-activations.adoc +++ b/downstream/modules/eda/proc-eda-delete-rulebook-activations.adoc @@ -2,7 +2,9 @@ = Deleting rulebook activations +.Procedure + . Select the btn:[More Actions] icon *{MoreActionsIcon}* next to the *Rulebook Activation enabled/disabled* toggle. . Select btn:[Delete rulebook activation]. -. In the popup window, select btn:[Yes, I confirm that I want to delete these X rulebook activations]. +. In the window, select btn:[Yes, I confirm that I want to delete these X rulebook activations]. . Select btn:[Delete rulebook activations]. diff --git a/downstream/modules/eda/proc-eda-edit-credential.adoc b/downstream/modules/eda/proc-eda-edit-credential.adoc index aadb801f03..12059fd745 100644 --- a/downstream/modules/eda/proc-eda-edit-credential.adoc +++ b/downstream/modules/eda/proc-eda-edit-credential.adoc @@ -2,6 +2,8 @@ = Editing a credential +You can edit existing credentials to ensure the appropriate level of access for your organization. + .Procedure . Edit the credential by using one of these methods: diff --git a/downstream/modules/eda/proc-eda-editing-a-project.adoc b/downstream/modules/eda/proc-eda-editing-a-project.adoc index c09adc25f8..79edf34ba2 100644 --- a/downstream/modules/eda/proc-eda-editing-a-project.adoc +++ b/downstream/modules/eda/proc-eda-editing-a-project.adoc @@ -4,8 +4,7 @@ .Procedure -. From the *Projects* list view, select the btn:[More Actions] icon *{MoreActionsIcon}* next to the desired project. -. Select btn:[Edit project]. +. From the *Projects* list view, select the btn:[More Actions] icon *{MoreActionsIcon}* next to the desired project. The Edit page is displayed. . Enter the required changes and select btn:[Save project]. - -image::eda-edit-project.png[Edit project] +//[J. Self]replace the following image, if possible +//::eda-edit-project.png[Edit project] \ No newline at end of file diff --git a/downstream/modules/eda/proc-eda-enable-rulebook-activations.adoc b/downstream/modules/eda/proc-eda-enable-rulebook-activations.adoc index 9657ef0aa3..7bce0494f4 100644 --- a/downstream/modules/eda/proc-eda-enable-rulebook-activations.adoc +++ b/downstream/modules/eda/proc-eda-enable-rulebook-activations.adoc @@ -2,6 +2,8 @@ = Enabling and disabling rulebook activations +.Procedure + . Select the switch on the row level to enable or disable your chosen rulebook. -. In the popup window, select btn:[Yes, I confirm that I want to enable/disable these X rulebook activations]. +. In the window, select btn:[Yes, I confirm that I want to enable/disable these X rulebook activations]. . Select btn:[Enable/Disable rulebook activation]. diff --git a/downstream/modules/eda/proc-eda-event-streams-not-sending-events.adoc b/downstream/modules/eda/proc-eda-event-streams-not-sending-events.adoc new file mode 100644 index 0000000000..9767e7c22c --- /dev/null +++ b/downstream/modules/eda/proc-eda-event-streams-not-sending-events.adoc @@ -0,0 +1,23 @@ +[id="eda-event-streams-not-sending-events"] + += Event streams not sending events to activation + +If you are using event streams to send events to your rulebook activations, occasionally those events might not be successfully routed to your rulebook activation. + +.Procedure +* Try the following options to resolve this. +.. Ensure that each of your event streams in {EDAcontroller} is _not_ in *Test* mode . This means activations would not receive the events. +.. Verify that the origin service is sending the request properly. +.. Check that the network connection to your {Gateway} instance is stable. If you have set up event streams, this is the entry of the event stream request from the sender. +.. Verify that the proxy in the {Gateway} is running. +.. Confirm that the event stream worker is up and running, and able to process the request. +.. Verify that your credential is correctly set up in the event stream. +.. Confirm that the request complies with the authentication mechanism determined by the set credential (for example, basic must contain a header with the credentials or HMAC must contain the signature of the content in a header, and similar). ++ +[NOTE] +==== +The credentials might have been changed in {EDAcontroller}, but not updated in the origin service. +==== + +.. Verify that the rulebook that is running in the activation reacts to these events. This would indicate that you wrote down the event source _and_ added actions that consume the events coming in. Otherwise, the event does reach the activation but there is nothing to activate it. +.. If you are using self-signed certificates, you might want to disable certificate validation when sending webhooks from vendors. Most of the vendors have an option to disable certificate validation for testing or non-production environments. diff --git a/downstream/modules/eda/proc-eda-replace-sources-with-event-streams.adoc b/downstream/modules/eda/proc-eda-replace-sources-with-event-streams.adoc new file mode 100644 index 0000000000..70db42435c --- /dev/null +++ b/downstream/modules/eda/proc-eda-replace-sources-with-event-streams.adoc @@ -0,0 +1,96 @@ +[id="eda-replace-sources-with-event-streams"] + += Replacing sources and attaching event streams to activations + +When you create rulebook activations, you can use event streams to swap out source mappings in rulebook activations and simplify routing from external sources to {EDAcontroller}. + +There are several key points to keep in mind regarding source mapping: + +. An event stream can only be used once in a rulebook source swap. If you have multiple sources in the rulebook, you can only replace each source once. +. The source mapping happens only in the current rulebook activation. You must repeat this process for any other activations using the same rulebook. +. The source mapping is valid only if the rulebook doesn't get modified. If the rulebook gets modified during the source mapping process, the source mapping would fail and it would have to be repeated. +. If the rulebook is modified after the source mapping has been created and a *Restart* happens, the rulebook activation fails. + + +.Procedure + +. Log in to {PlatformNameShort}. +. From the navigation panel, select {MenuADRulebookActivations}. +. Click btn:[Create rulebook activation]. +. Insert the following: ++ +Name:: Insert the name. +Description:: This field is optional. +Organization:: Enter your organization name or select Default from the list. +Project:: Projects are a logical collection of rulebooks. This field is optional. ++ +[NOTE] +==== +Although this field is optional, selecting a project helps refine your list of rulebooks choices. +==== + +Rulebook:: Rulebooks are shown according to the project selected. Select a rulebook. ++ +[NOTE] +==== +After you have selected a rulebook, the Event streams field is enabled. You can click the gear icon to display the Event streams mapping form. +==== + +Event streams:: All the event streams available and set up to forward events to rulebook actiavtions are displayed. If you have not created any event streams, this field remains disabled. ++ +Click the gear icon to display the Event streams mapping UI. ++ +image:eda-latest-event-streams-mapping.png[Event streams mapping UI] ++ +Complete the following fields: ++ +Rulebook source::: A rulebook can contain multiple sources across multiple rulesets. You can map the same rulebook in multiple activations to multiple event streams. While managing event streams, unnamed sources are assigned temporary names (__SOURCE {n}) for identification purposes. ++ +Select __SOURCE_1 from the list. ++ +Event stream::: Select your event stream name from the list. ++ +Click btn:[Save]. ++ +Event streams can replace matching sources in a rulebook, and are server-side webhooks that enable you to connect various event sources to your rulebook activations. Source types that can be replaced with the event stream's source of type ansible.eda.pg_listener include ansible.eda.webhook and other compatible webhook source plugins. Replacing selected sources affects this activation only, and modifies the rulebook's source type, source name, and arguments. Filters, rules, conditions, and actions are all unaffected. ++ +You can select which source you want to replace with a single event stream. If there are multiple sources in your rulebook, you can choose to replace each one of them with event streams, but you are not required to replace each one. The following image displays which sources can be replaced. ++ +image:eda-event-streams-swapping-sources.png[Event streams replacement sources] ++ +The items in pink demonstrate the sources that can be replaced: source type, source name, and arguments. The remaining items (filters, rules, and actions) are not replaced. ++ +Credential:: Select 0 or more credentials for this rulebook activation. This field is optional. ++ +[NOTE] +==== +The credentials that display in this field are customized based on your rulebook activation and only include the following credential types: Vault, {PlatformName}, or any custom credential types that you have created. For more information on credentials, see link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html-single/using_automation_decisions/index#eda-credentials[Credentials]. +==== ++ +Decision environment:: A decision environment is a container image used to run Ansible rulebooks. ++ +[NOTE] +==== +In {EDAcontroller}, you cannot customize the pull policy of the decision environment. By default, it follows the behavior of the always policy. Every time an activation is started, the system tries to pull the most recent version of the image. +==== +Restart policy:: This is the policy that determines how an activation should restart after the container process running the source plugin ends. +*** Policies: +... *Always*: This restarts the rulebook activation immediately, regardless of whether it ends successfully or not, and occurs no more than 5 times. +... *Never*: This never restarts a rulebook activation when the container process ends. +... *On failure*: This restarts the rulebook activation after 60 seconds by default, only when the container process fails, and occurs no more than 5 times. +Log level:: This field defines the severity and type of content in your logged events. +*** Levels: +... *Error*: Logs that contain error messages that are displayed in the *History* tab of an activation. +... *Info*: Logs that contain useful information about rulebook activations, such as a success or failure, triggered action names and their related action events, and errors. +... *Debug*: Logs that contain information that is only useful during the debug phase and might be of little value during production. +This log level includes both error and log level data. +Service name:: This defines a service name for Kubernetes to configure inbound connections if the activation exposes a port. This field is optional. +Rulebook activation enabled?:: This automatically enables the rulebook activation to run. +Variables:: The variables for the rulebook are in a JSON or YAML format. +The content would be equivalent to the file passed through the `--vars` flag of ansible-rulebook command. +Options:: Check the *Skip audit events* option if you do not want to see your events in the Rule Audit. +. Click btn:[Create rulebook activation]. ++ +After you create your rulebook activation, the *Details* page is displayed. ++ +You can navigate to the Event streams page to confirm your events have been received. \ No newline at end of file diff --git a/downstream/modules/eda/proc-eda-resend-webhook-data-event-streams.adoc b/downstream/modules/eda/proc-eda-resend-webhook-data-event-streams.adoc new file mode 100644 index 0000000000..f92c7dd90d --- /dev/null +++ b/downstream/modules/eda/proc-eda-resend-webhook-data-event-streams.adoc @@ -0,0 +1,13 @@ +[id="eda-resend-webhook-data-event-streams"] + += Resending webhook data from your event stream type + +After you have replaced your sources with the event stream you created, you can now resend data from the event stream to ensure that it is attached to your rulebook activation. In the example shared earlier, the GitHub event stream was used. The following example demonstrates how to resend webhook data if you were using a GitHub event stream. + +.Procedure +. Go back to the *GitHub Webhook / Manage webhook* page. +. Click the *Recent Deliveries* tab. +. Click the btn:[ellipsis]. +. Click btn:[Redeliver]. A *Redeliver payload?* window is displayed with a delivery message. +. Click *Yes, redeliver this payload*. +. Return to the {PlatformNameShort} to check your rule audit. \ No newline at end of file diff --git a/downstream/modules/eda/proc-eda-restart-rulebook-activations.adoc b/downstream/modules/eda/proc-eda-restart-rulebook-activations.adoc index 0ae85f2aab..7347a30c71 100644 --- a/downstream/modules/eda/proc-eda-restart-rulebook-activations.adoc +++ b/downstream/modules/eda/proc-eda-restart-rulebook-activations.adoc @@ -7,7 +7,9 @@ You can only restart a rulebook activation if it is currently enabled and the restart policy was set to *Always* when it was created. ==== +.Procedure + . Select the btn:[More Actions] icon *{MoreActionsIcon}* next to *Rulebook Activation enabled/disabled* toggle. . Select btn:[Restart rulebook activation]. -. In the popup window, select btn:[Yes, I confirm that I want to restart these X rulebook activations]. +. In the window, select btn:[Yes, I confirm that I want to restart these X rulebook activations]. . Select btn:[Restart rulebook activations]. diff --git a/downstream/modules/eda/proc-eda-set-up-credential-types.adoc b/downstream/modules/eda/proc-eda-set-up-credential-types.adoc new file mode 100644 index 0000000000..d74e05858c --- /dev/null +++ b/downstream/modules/eda/proc-eda-set-up-credential-types.adoc @@ -0,0 +1,103 @@ +[id="eda-set-up-new-credential-types"] + += Creating a new credential type + +You can create a credential type to use with a source plugin that you select based on the supported, default credential types. You can make your credential type available to a team or individuals. + + +.Procedure + +. Log in to the {PlatformNameShort} Dashboard. +. From the navigation panel, select {MenuADCredentialType}. +. Click btn:[Create credential type]. +. Insert the following: ++ +Name:: Insert the name. +Description:: This field is optional. +. In the *Input Configuration* field, specify an input schema that defines a set of ordered fields for that type. The format can be in YAML or JSON: ++ +*YAML* ++ +[literal, options="nowrap" subs="+attributes"] +---- +fields: + - type: string + id: username + label: Username + - type: string + id: password + label: Password + secret: true +required: + - username + - password +---- ++ + +View more YAML examples at the link:https://yaml.org/spec/1.2.2/[YAML page]. ++ +*JSON* ++ +[literal, options="nowrap" subs="+attributes"] +---- +{ +"fields": [ + { + "type": "string", + "id": "username", + "label": "Username" + }, + { + "secret": true, + "type": "string", + "id": "password", + "label": "Password" + } + ], + "required": ["username", "password"] +} +---- ++ +View more JSON examples at link:https://www.json.org/json-en.html[The JSON website]. + +. In the *Injector Configuration* field, enter environment variables or extra variables that specify the values a credential type can inject. +The format can be in YAML or JSON (see examples in the previous step). ++ +The following configuration in JSON format shows each field and how they are used: ++ +[literal, options="nowrap" subs="+attributes"] +---- + +{ + "extra_vars": { + "some_extra_var": "{{ username }}:{{ password }}" + } +} +---- + +. Click btn:[Create credential type]. ++ +Your newly created credential type is displayed in the list of credential types: + ++ +image:credential-types-new-listed.png[New credential type] +//[JMS] Replace image with EDA version + +. Click the btn:[Edit credential type] image:leftpencil.png[Edit,15,15] icon to modify the credential type options. ++ +[NOTE] +==== +On the *Edit* page, you can modify the details or delete the credential. +If the *Delete* option is disabled, this means that the credential type is being used by a credential, and you must delete the credential type from all the credentials that use it before you can delete it. +==== + +.Verification + +* Verify that the newly created credential type can be selected from the *Credential Type* selection window when creating a new credential: ++ +image:credential-types-new-listed-verify.png[Verify new credential type] +//[JMS] Replace this image with up to date one, maybe? + +.Additional resources + +For information about how to create a new credential, see xref:eda-set-up-credential[Setting up credentials]. diff --git a/downstream/modules/eda/proc-eda-set-up-credential.adoc b/downstream/modules/eda/proc-eda-set-up-credential.adoc index 7cdb5d0a93..f183f02eb5 100644 --- a/downstream/modules/eda/proc-eda-set-up-credential.adoc +++ b/downstream/modules/eda/proc-eda-set-up-credential.adoc @@ -2,35 +2,34 @@ = Setting up credentials -Create a credential to use with a private repository (GitHub or GitLab) or a private container registry. +You can create a credential to use with a source plugin or a private container registry that you select. You can make your credential available to a team or individuals. -[IMPORTANT] -==== -If you are using a GitHub or GitLab repository, use the `basic auth` method. -Both SCM servers are officially supported. -You can use any SCM provider that supports `basic auth`. -==== +//[IMPORTANT] +//==== +//If you are using a GitHub or GitLab repository, use the `basic auth` method. +//Both SCM servers are officially supported. +//You can use any SCM provider that supports `basic auth`. +//==== .Procedure // ddacosta: I'm not sure whether there will be an EDA specific dashboard in the gateway. Step 1 might need to change to something like "Log in to AAP". // Also, Credentials will be centrally defined at the platform level for 2.5. Steps here should be verified/rewritten as appropriate and possibly relocated to Authentication docs -. Log in to the {EDAcontroller} Dashboard. -. From the navigation panel, select {MenuAMCredentials}. +. Log in to the {PlatformNameShort} Dashboard. +. From the navigation panel, select {MenuADCredentials}. . Click btn:[Create credential]. . Insert the following: + Name:: Insert the name. Description:: This field is optional. -Credential type:: The options available are a GitHub personal access token, a GitLab personal access token, or a container registry. -Username:: Insert the username. -Token:: Insert a token that allows you to authenticate to your destination. +Organization:: Click the list to select an organization or select *Default*. +Credential type:: Click the list to select your Credential type. + [NOTE] ==== -If you are using a container registry, the token field can be a token or a password, depending on the registry provider. -If you are using the {PlatformNameShort} hub registry, insert the password for that in the token field. -==== -+ +When you select the credential type, the *Type Details* section is displayed with fields that are applicable for the credential type you chose. +==== + +. Complete the fields that are applicable to the credential type you selected. . Click btn:[Create credential]. After saving the credential, the credentials details page is displayed. diff --git a/downstream/modules/eda/proc-eda-set-up-new-decision-environment.adoc b/downstream/modules/eda/proc-eda-set-up-new-decision-environment.adoc index 2de25c071e..e2a57f5433 100644 --- a/downstream/modules/eda/proc-eda-set-up-new-decision-environment.adoc +++ b/downstream/modules/eda/proc-eda-set-up-new-decision-environment.adoc @@ -1,29 +1,30 @@ [id="eda-set-up-new-decision-environment"] = Setting up a new decision environment -// [ddacosta] I don't think there will be an EDA specific dashboard in the gateway. This might need to be changed to reflect the changes for 2.5. -The following steps describe how to import a decision environment into your {EDAcontroller} Dashboard. + +You can import a decision environment into your {EDAcontroller} using a default or custom decision environment. .Prerequisites -* You are logged in to the {EDAcontroller} Dashboard as a Content Consumer. * You have set up a credential, if necessary. For more information, see the xref:eda-set-up-credential[Setting up credentials] section. -* You have pushed a decision environment image to an image repository or you chose to use the image `de-supported` provided at link:http://registry.redhat.io/[registry.redhat.io]. +* You have pushed a decision environment image to an image repository or you chose to use the `de-minimal` image that is provided by `de-supported` options located in link:http://registry.redhat.io/[registry.redhat.io]. .Procedure -// ddacosta I'm not sure whether there will be an EDA specific dashboard in the gateway. Step 1 might need to change to something like "Log in to AAP". -. Navigate to the {EDAcontroller} Dashboard. -. From the navigation panel, select {MenuADDecisionEnvironments}. + +. Log in to {PlatformNameShort}. +. Navigate to {MenuADDecisionEnvironments}. +. Click btn:[Create decision environment]. . Insert the following: + Name:: Insert the name. Description:: This field is optional. +Organization:: Select an organization to associate with the decision environment. Image:: This is the full image location, including the container registry, image name, and version tag. -Credential:: This field is optional. This is the token needed to utilize the decision environment image. +Credential:: This field is optional. This is the credential needed to use the decision environment image. . Select btn:[Create decision environment]. -Your decision environment is now created and can be managed on the *Decision Environments* screen. +Your decision environment is now created and can be managed on the *Decision Environments* page. After saving the new decision environment, the decision environment's details page is displayed. From there or the *Decision Environments* list view, you can edit or delete it. diff --git a/downstream/modules/eda/proc-eda-set-up-new-project.adoc b/downstream/modules/eda/proc-eda-set-up-new-project.adoc index a933cfec2c..84ccb95c6b 100644 --- a/downstream/modules/eda/proc-eda-set-up-new-project.adoc +++ b/downstream/modules/eda/proc-eda-set-up-new-project.adoc @@ -2,32 +2,46 @@ = Setting up a new project +You can set up projects to manage and store your rulebooks in {EDAcontroller}. + .Prerequisites // [ddacosta] I'm not sure whether there will be an EDA specific dashboard in the gateway. Step 1 might need to change to something like "Log in to AAP". -* You are logged in to the {EDAcontroller} Dashboard as a Content Consumer. +* You are logged in to the {PlatformNameShort} Dashboard as a Content Consumer. * You have set up a credential, if necessary. For more information, see the xref:eda-set-up-credential[Setting up credentials] section. * You have an existing repository containing rulebooks that are integrated with playbooks contained in a repository to be used by {ControllerName}. .Procedure // [ddacosta] I'm not sure whether there will be an EDA specific dashboard in the gateway. Step 1 might need to change to something like "Log in to AAP". -. Log in to the {EDAcontroller} Dashboard. -. From the navigation panel, select *{MenuADProjects}*. +. Log in to the {PlatformNameShort} Dashboard. +. Navigate to *{MenuADProjects}*. +. Click btn:[Create project]. . Insert the following: + Name:: Enter project name. Description:: This field is optional. -SCM type:: Git is the only SCM type available for use. -SCM URL:: HTTP[S] protocol address of a repository, such as GitHub or GitLab. +Source control type:: Git is the only source control type available for use. This field is optional. +Source control URL:: Enter Git, SSH, or HTTP[S] protocol address of a repository, such as GitHub or GitLab. This field is not editable. ++ +[NOTE] +==== +This field accepts SSH private key or private key phrase. To enable the use of these private keys, your project URL must begin with `git@`. +==== +Proxy:: This is used to access access HTTP or HTTPS servers. This field is optional. +Source control branch/tag/commit:: This is the branch to checkout. In addition to branches, you can input tags, commit hashes, and arbitrary refs. Some commit hashes and refs may not be available unless you also provide a custom refspec. This field is optional. +Source control refspec:: A refspec to fetch (passed to the Ansible git module). This parameter allows access to references via the branch field not otherwise available. This field is optional. +For more information, see link:https://docs.ansible.com/ansible/latest/collections/ansible/builtin/git_module.html#examples[Examples]. +Source control credential:: You must have this credential to utilize the source control URL. This field is optional. +Content signature validation credential:: Enable content signing to verify that the content has remained secure when a project is synced. If the content has been tampered with, the job will not run. This field is optional. +Options:: The Verify SSL option is enabled by default. Enabling this option verifies the SSL with HTTPS when the project is imported. + [NOTE] ==== -You cannot edit the SCM URL after you create the project. +You can disable this option if you have a local repository that uses self-signed certificates. ==== -Credential:: This field is optional. This is the token needed to utilize the SCM URL. . Select btn:[Create project]. -Your project is now created and can be managed in the *Projects* screen. +Your project is now created and can be managed in the *Projects* page. After saving the new project, the project's details page is displayed. From there or the *Projects* list view, you can edit or delete it. diff --git a/downstream/modules/eda/proc-eda-set-up-rhaap-credential.adoc b/downstream/modules/eda/proc-eda-set-up-rhaap-credential.adoc new file mode 100644 index 0000000000..6d349bbc42 --- /dev/null +++ b/downstream/modules/eda/proc-eda-set-up-rhaap-credential.adoc @@ -0,0 +1,40 @@ +[id="eda-set-up-rhaap-credential"] + += Setting up a {PlatformName} credential + +You can create a {PlatformName} credential type to run your rulebook activations. + +.Prerequisites + +* You have created a user. +* You have obtained the URL and the credentials to access {ControllerName}. + + +.Procedure + +. Log in to the {PlatformNameShort} Dashboard. +. From the navigation panel, select {MenuADCredentials}. +. Click btn:[Create credential]. +. Insert the following: ++ +Name:: Insert the name. +Description:: This field is optional. +Organization:: Click the list to select an organization or select *Default*. +Credential type:: Click the list and select *{PlatformName}*. ++ +[NOTE] +==== +When you select the credential type, the *Type Details* section is displayed with fields that are applicable for the credential type you chose. +==== +. In the required {PlatformName} field, enter your automation controller URL. ++ +[NOTE] +==== +For {EDAcontroller} {PlatformVers} with {ControllerName} 2.4, use the following example: \https:// + +For {PlatformNameShort} {PlatformVers}, use the following example: \https:///api/controller/ +==== +. Enter a valid *Username* and *Password*, or *Oauth Token*. +. Click btn:[Create credential]. + +After you create this credential, you can use it for configuring your rulebook activations. \ No newline at end of file diff --git a/downstream/modules/eda/proc-eda-set-up-rulebook-activation.adoc b/downstream/modules/eda/proc-eda-set-up-rulebook-activation.adoc index a0628bd76f..8326dd5a8f 100644 --- a/downstream/modules/eda/proc-eda-set-up-rulebook-activation.adoc +++ b/downstream/modules/eda/proc-eda-set-up-rulebook-activation.adoc @@ -4,21 +4,30 @@ .Prerequisites // [ddacosta] I'm not sure whether there will be an EDA specific dashboard in the gateway. Step 1 might need to change to something like "Log in to AAP". -* You are logged in to the {EDAcontroller} Dashboard as a Content Consumer. +* You are logged in to the {PlatformNameShort} Dashboard as a Content Consumer. * You have set up a project. * You have set up a decision environment. -* You have set up an {ControllerName} token. .Procedure // [ddacosta] I'm not sure whether there will be an EDA specific dashboard in the gateway. Step 1 might need to change to something like "Log in to AAP". -. Navigate to the {EDAcontroller} Dashboard. -. From the navigation panel, select {MenuADRulebookActivations}. +. Log in to {PlatformNameShort}. +. Navigate to the {MenuADRulebookActivations}. +. Click btn:[Create rulebook activation]. . Insert the following: + Name:: Insert the name. Description:: This field is optional. -Project:: Projects are a logical collection of rulebooks. -Rulebook:: Rulebooks are shown according to the project selected. +Organization:: Enter your organization name or select Default from the list. +Project:: Projects are a logical collection of rulebooks. This field is optional. +Rulebook:: Rulebooks are displayed according to the project selected. +Credential:: Select 0 or more credentials for this rulebook activation. This field is optional. ++ +[NOTE] +==== +* The credentials that display in this field are customized based on your rulebook activation and only include the following credential types: Vault, {PlatformName}, or any custom credential types that you have created. For more information about credentials, see xref:eda-credentials[Credentials]. +* If you plan to use a {PlatformName} credential, you can _only_ select 1 {PlatformName} credential type for a rulebook activation. +==== + Decision environment:: Decision environments are a container image to run Ansible rulebooks. + [NOTE] @@ -27,18 +36,33 @@ In {EDAcontroller}, you cannot customize the pull policy of the decision environ By default, it follows the behavior of the *always* policy. Every time an activation is started, the system tries to pull the most recent version of the image. ==== -Restart policy:: This is a policy to decide when to restart a rulebook. +Restart policy:: This is the policy that determines how an activation should restart after the container process running the source plugin ends. *** Policies: -... Always: Restarts when a rulebook finishes -... Never: Never restarts a rulebook when it finishes -... On failure: Only restarts when it fails +... *Always*: This restarts the rulebook activation immediately, regardless of whether it ends successfully or not, and occurs no more than 5 times. +... *Never*: This never restarts a rulebook activation when the container process ends. +... *On failure*: This restarts the rulebook activation after 60 seconds by default, only when the container process fails, and occurs no more than 5 times. +Log level:: This field defines the severity and type of content in your logged events. +*** Levels: +... *Error*: Logs that contain error messages that are displayed in the *History* tab of an activation. +... *Info*: Logs that contain useful information about rulebook activations, such as a success or failure, triggered action names and their related action events, and errors. +... *Debug*: Logs that contain information that is only useful during the debug phase and might be of little value during production. +This log level includes both error and log level data. +Service name:: This defines a service name for Kubernetes to configure inbound connections if the activation exposes a port. This field is optional. Rulebook activation enabled?:: This automatically enables the rulebook activation to run. -Variables:: The variables for the rulebook are in a JSON/YAML format. +Variables:: The variables for the rulebook are in a JSON or YAML format. The content would be equivalent to the file passed through the `--vars` flag of ansible-rulebook command. +Options:: Check the *Skip audit events* option if you do not want to see your events in the Rule Audit. . Click btn:[Create rulebook activation]. -Your rulebook activation is now created and can be managed in the *Rulebook Activations* screen. +Your rulebook activation is now created and can be managed on the *Rulebook Activations* page. -After saving the new rulebook activation, the rulebook activation's details page is displayed. -From there or the *Rulebook Activations* list view you can edit or delete it. +After saving the new rulebook activation, the rulebook activation's details page is displayed, with either a *Pending*, *Running*, or *Failed* status. +From there or the *Rulebook Activations* list view, you can restart or delete it. + +[NOTE] +==== +Occasionally, when a source plugin shuts down, it causes a rulebook to exit gracefully after a certain amount of time. +When a rulebook activation shuts down, any tasks that are waiting to be performed will be canceled, and an info level message is sent to the activation log. +For more information, see link:https://ansible.readthedocs.io/projects/rulebook/en/stable/rulebooks.html#[Rulebooks]. +==== diff --git a/downstream/modules/eda/proc-eda-verify-event-streams-work.adoc b/downstream/modules/eda/proc-eda-verify-event-streams-work.adoc new file mode 100644 index 0000000000..2bbdd5181f --- /dev/null +++ b/downstream/modules/eda/proc-eda-verify-event-streams-work.adoc @@ -0,0 +1,24 @@ +[id="eda-verify-event-streams"] + += Verifying your event streams work + +Verify that you can use your event stream to connect to a remote system and receive data. + +. Log in to {PlatformNameShort}. +. From the navigation panel, select {MenuADEventStreams}. +. Select the event stream that you created to validate connectivity and ensure that the event stream sends data to the rulebook activation. +. Verify that the events were received.You can see in the *Events received* field that the event was received. You can also see the header for the event stream that contains details about the event. ++ +image:eda-verify-event-streams.png[Verify event streams work] ++ +If you scroll down in the UI, you can also see the body of the payload with more information about the webhook. ++ +image:eda-payload-body-event-streams.png[Payload body] ++ + +The *Header* and *Body* sections for the event stream are displayed on the Details page. They differ based on the vendor who is sending the event. The header and body can be used to check the attributes in the event payload, which will help you in writing conditions in your rulebook. For example: + +. Toggle the *Forward events to rulebook activation* option to enable you to push your events to a rulebook activation. +This moves the event stream to production mode and makes it easy to attach to rulebook activations. ++ +When this option is toggled off, your ability to forward events to a rulebook activation is disabled and the *This event stream is disabled* message is displayed. \ No newline at end of file diff --git a/downstream/modules/eda/proc-eda-view-activation-output.adoc b/downstream/modules/eda/proc-eda-view-activation-output.adoc index c7e0e54460..8fd7436e4b 100644 --- a/downstream/modules/eda/proc-eda-view-activation-output.adoc +++ b/downstream/modules/eda/proc-eda-view-activation-output.adoc @@ -7,8 +7,9 @@ You can view the output of the activations in the *History* tab. .Procedure . Select the *History* tab to access the list of all the activation instances. An activation instance represents a single execution of the activation. -. Then select the activation instance in question, this will show you the *Output* produced by that specific execution. +. Then select the activation instance in question, this shows you the *Output* produced by that specific execution. +//Replace this screenshot with current view image::eda-rulebook-activation-history.png[Rulebook activation history] To view events that came in and triggered an action, you can use the xref:eda-rule-audit[Rule Audit] section in the {EDAcontroller} Dashboard. diff --git a/downstream/modules/eda/proc-eda-view-rule-audit-actions.adoc b/downstream/modules/eda/proc-eda-view-rule-audit-actions.adoc index e0f52c1174..9e3487a6fa 100644 --- a/downstream/modules/eda/proc-eda-view-rule-audit-actions.adoc +++ b/downstream/modules/eda/proc-eda-view-rule-audit-actions.adoc @@ -5,7 +5,7 @@ .Procedure . From the navigation panel select *{MenuADRuleAudit}*. -. Select the desired rule, this brings you to the *Actions* tab. +. Select the desired rule, then select the *Actions* tab. From here you can view executed actions that were taken. -Some actions are linked out to {ControllerName} where you can view the output. +Some actions are linked out to {MenuTopAE} where you can view the output. diff --git a/downstream/modules/eda/proc-modifying-activations-after-install.adoc b/downstream/modules/eda/proc-modifying-activations-after-install.adoc new file mode 100644 index 0000000000..48d2c536e4 --- /dev/null +++ b/downstream/modules/eda/proc-modifying-activations-after-install.adoc @@ -0,0 +1,14 @@ +[id="modifying-activations-after-install"] + += Modifying the number of simultaneous rulebook activations after {EDAcontroller} installation + +[role="_abstract"] +By default, {EDAcontroller} allows 12 rulebook activations per node. For example, with two worker or hybrid nodes, it results in a limit of 24 activations in total to run simultaneously. +You can modify this default value after installation by using the following procedure: + +.Procedure +. Navigate to the environment file at `/etc/ansible-automation-platform/eda/settings.yaml`. +. Choose the number of maximum running activations that you need. +For example, `MAX_RUNNING_ACTIVATIONS = 16` +. Use the following command to restart {EDAName} services: `automation-eda-controller-service restart` + diff --git a/downstream/modules/eda/proc-modifying-activations-during-install.adoc b/downstream/modules/eda/proc-modifying-activations-during-install.adoc new file mode 100644 index 0000000000..80bbd8b82c --- /dev/null +++ b/downstream/modules/eda/proc-modifying-activations-during-install.adoc @@ -0,0 +1,14 @@ +[id="modifying-activations-during-install"] + += Modifying the number of simultaneous rulebook activations during {EDAcontroller} installation + +[role="_abstract"] +By default, {EDAcontroller} allows 12 rulebook activations per node. For example, with two worker or hybrid nodes, it results in a limit of 24 activations in total to run simultaneously. You can modify this default value during installation by using the following procedure: + +.Procedure +Provide a variable to the VM installer: + +. Navigate to the setup inventory file. +. Add `automationedacontroller_max_running_activations` in the [all:vars] section. +For example, `automationedacontroller_max_running_activations=16`. +. Run the setup. \ No newline at end of file diff --git a/downstream/modules/eda/proc-modifying-memory-after-install.adoc b/downstream/modules/eda/proc-modifying-memory-after-install.adoc new file mode 100644 index 0000000000..929fad12b9 --- /dev/null +++ b/downstream/modules/eda/proc-modifying-memory-after-install.adoc @@ -0,0 +1,13 @@ +[id="modifying-memory-after-install"] + += Modifying the default memory limit for each rulebook activation after installation + +[role="_abstract"] +By default, each rulebook activation container has a 200MB memory limit. +You can modify this default value after installation by using the following procedure: + +.Procedure +. Navigate to the environment file at `/etc/ansible-automation-platform/eda/settings.yaml`. +. Modify the default container memory limit. +For example, `PODMAN_MEM_LIMIT = '300m'`. +. Restart the {EDAcontroller} services using `automation-eda-controller-service restart`. diff --git a/downstream/modules/eda/proc-modifying-memory-during-install.adoc b/downstream/modules/eda/proc-modifying-memory-during-install.adoc new file mode 100644 index 0000000000..cfd7d29008 --- /dev/null +++ b/downstream/modules/eda/proc-modifying-memory-during-install.adoc @@ -0,0 +1,13 @@ +[id="modifying-memory-during-install"] + += Modifying the default memory limit for each rulebook activation during installation + +[role="_abstract"] +By default, each rulebook activation container has a 200MB memory limit. +You can modify this default value during installation by using the following procedure: + +.Procedure +. Navigate to the setup inventory file. +. Add `automationedacontroller_podman_mem_limit` in the [all:vars] section. +For example, `automationedacontroller_podman_mem_limit='400m'`. +. Run the setup. diff --git a/downstream/modules/eda/ref-deploy-eda-controller-with-aap-operator-on-ocp.adoc b/downstream/modules/eda/ref-deploy-eda-controller-with-aap-operator-on-ocp.adoc index c9d9d55181..7f0e0fb22d 100644 --- a/downstream/modules/eda/ref-deploy-eda-controller-with-aap-operator-on-ocp.adoc +++ b/downstream/modules/eda/ref-deploy-eda-controller-with-aap-operator-on-ocp.adoc @@ -1,7 +1,7 @@ [id="deploying-eda-controller-with-aap-operator-on-ocp"] -= Deploying {EDAcontroller} with {OperatorPlatform} on {OCPShort} += Deploying {EDAcontroller} with {OperatorPlatformNameShort} on {OCPShort} -{EDAName} is not limited to {PlatformNameShort} on VMs. You can also access this feature on {OperatorPlatform} on {OCPShort}. To deploy {EDAName} with {OperatorPlatform}, follow the instructions in link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/deploying_the_red_hat_ansible_automation_platform_operator_on_openshift_container_platform/index#deploy-eda-controller-on-aap-operator-ocp[Deploying Event-Driven Ansible controller with Ansible Automation Platform Operator on OpenShift Container Platform]. +{EDAName} is not limited to {PlatformNameShort} on VMs. You can also access this feature on {OperatorPlatformNameShort} on {OCPShort}. To deploy {EDAName} with {OperatorPlatformNameShort}, follow the instructions in link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/deploying_the_red_hat_ansible_automation_platform_operator_on_openshift_container_platform/index#deploy-eda-controller-on-aap-operator-ocp[Deploying Event-Driven Ansible controller with Ansible Automation Platform Operator on OpenShift Container Platform]. After successful deployment, you can connect to event sources and resolve issues more efficiently. diff --git a/downstream/modules/eda/ref-eda-controller-install-builder.adoc b/downstream/modules/eda/ref-eda-controller-install-builder.adoc new file mode 100644 index 0000000000..b9ae42f0a5 --- /dev/null +++ b/downstream/modules/eda/ref-eda-controller-install-builder.adoc @@ -0,0 +1,11 @@ +[id="eda-controller-install-builder"] + += Installing ansible-builder + +To build images, you must have Podman or Docker installed, along with the `ansible-builder` Python package. + +The `--container-runtime` option must correspond to the Podman or Docker executable you intend to use. + +When building a decision environment image, it must support the architecture that {PlatformNameShort} is deployed with. + +For more information, see link:https://ansible.readthedocs.io/projects/builder/en/latest/#quickstart-for-ansible-builder[Quickstart for Ansible Builder] or link:{LinkBuilder}. diff --git a/downstream/modules/eda/ref-eda-logging-samples.adoc b/downstream/modules/eda/ref-eda-logging-samples.adoc new file mode 100644 index 0000000000..c91a1ddecf --- /dev/null +++ b/downstream/modules/eda/ref-eda-logging-samples.adoc @@ -0,0 +1,75 @@ +[id="eda-logging-samples"] + += Logging samples + +When the following APIs are called for each operation, you see the following audit logs: + +.Rulebook activation + +---- +1. Create + 1. 2024-08-15 14:13:20,384 aap_eda.api.views.activation INFO Action: Create / ResourceType: RulebookActivation / ResourceName: quick_start_project / ResourceID: 53 / Organization: Default +2. Read + 1. 2024-08-15 14:21:26,844 aap_eda.api.views.activation INFO Action: Read / ResourceType: RulebookActivation / ResourceName: quick_start_activation / ResourceID: 1 / Organization: Default +3. Disable + 1. 2024-08-15 14:23:57,798 aap_eda.api.views.activation INFO Action: Disable / ResourceType: RulebookActivation / ResourceName: quick_start_activation / ResourceID: 1 / Organization: Default +4. Enable + 1. 2024-08-15 14:24:16,472 aap_eda.api.views.activation INFO Action: Enable / ResourceType: RulebookActivation / ResourceName: quick_start_activation / ResourceID: 1 / Organization: Default +5. Delete + 1. 2024-08-15 14:24:53,847 aap_eda.api.views.activation INFO Action: Delete / ResourceType: RulebookActivation / ResourceName: quick_start_activation / ResourceID: 1 / Organization: Default +6. Restart + 2024-08-15 14:24:34,169 aap_eda.api.views.activation INFO Action: Restart / ResourceType: RulebookActivation / ResourceName: quick_start_activation / ResourceID: 1 / Organization: Default +---- + +.EventStream Logs +---- +1. Create + 1. 2024-08-15 13:46:26,903 aap_eda.api.views.webhook INFO Action: Create / ResourceType: EventStream / ResourceName: ZackTest / ResourceID: 1 / Organization: Default +2. Update + 1. 2024-08-15 13:56:17,440 aap_eda.api.views.webhook INFO Action: Update / ResourceType: EventStream / ResourceName: ZackTest / ResourceID: 1 / Organization: Default +3. Read + 1. 2024-08-15 13:56:56,271 aap_eda.api.views.webhook INFO Action: Read / ResourceType: EventStream / ResourceName: ZackTest / ResourceID: 1 / Organization: Default +4. List + 1. 2024-08-15 13:56:17,492 aap_eda.api.views.webhook INFO Action: List / ResourceType: EventStream / ResourceName: * / ResourceID: * / Organization: * +5. Delete + 1. 2024-08-15 13:57:13,124 aap_eda.api.views.webhook INFO Action: Delete / ResourceType: EventStream / ResourceName: ZackTest / ResourceID: None / Organization: Default +---- + +.Decision Environment +---- +1. Create + 1. 2024-08-15 14:10:53,311 aap_eda.api.views.decision_environment INFO Action: Create / ResourceType: DecisionEnvironment / ResourceName: quick_start_de / ResourceID: 86 / Organization: Default +2. Read + 1. 2024-08-15 14:10:53,349 aap_eda.api.views.decision_environment INFO Action: Read / ResourceType: DecisionEnvironment / ResourceName: quick_start_de / ResourceID: 86 / Organization: Default +3. Update + 2024-08-15 14:11:20,970 aap_eda.api.views.decision_environment INFO Action: Update / ResourceType: DecisionEnvironment / ResourceName: quick_start_de / ResourceID: 86 / Organization: Default +4. Delete +2024-08-15 14:11:42,369 aap_eda.api.views.decision_environment INFO Action: Delete / ResourceType: DecisionEnvironment / ResourceName: quick_start_de / ResourceID: None / Organization: Default +---- + +.Project +---- +1. Create + 1. 2024-08-15 14:05:26,874 aap_eda.api.views.project INFO Action: Create / ResourceType: Project / ResourceName: quick_start_project / ResourceID: 86 / Organization: Default +2. Read + 1. 2024-08-15 14:05:26,913 aap_eda.api.views.project INFO Action: Read / ResourceType: Project / ResourceName: quick_start_project / ResourceID: 86 / Organization: Default +3. Update + 1. 2024-08-15 14:06:08,255 aap_eda.api.views.project INFO Action: Update / ResourceType: Project / ResourceName: quick_start_project / ResourceID: 86 / Organization: Default +4. Sync + 1. 2024-08-15 14:06:30,580 aap_eda.api.views.project INFO Action: Sync / ResourceType: Project / ResourceName: quick_start_project / ResourceID: 86 / Organization: Default +5. Delete + 1. 2024-08-15 14:06:49,481 aap_eda.api.views.project INFO Action: Delete / ResourceType: Project / ResourceName: quick_start_project / ResourceID: 86 / Organization: Default +---- + +.Activation Start/Stop +---- +1. Start + 1. 2024-08-15 14:21:29,076 aap_eda.services.activation.activation_manager INFO Requested to start activation 1, starting. + 2024-08-15 14:21:29,093 aap_eda.services.activation.activation_manager INFO Creating a new activation instance for activation: 1 + 2024-08-15 14:21:29,104 aap_eda.services.activation.activation_manager INFO Starting container for activation instance: 1 +2. Stop + 1. eda-activation-worker-1 | 2024-08-15 14:40:52,547 aap_eda.services.activation.activation_manager INFO Stop operation requested for activation id: 2 Stopping activation. + eda-activation-worker-1 | 2024-08-15 14:40:52,550 aap_eda.services.activation.activation_manager INFO Activation 2 is already stopped. + eda-activation-worker-1 | 2024-08-15 14:40:52,550 aap_eda.services.activation.activation_manager INFO Activation manager activation id: 2 Activation restart scheduled for 1 second. + eda-activation-worker-1 | 2024-08-15 14:40:52,562 rq.worker INFO activation: Job OK (activation-2) +---- diff --git a/downstream/modules/eda/ref-performance-troubleshooting.adoc b/downstream/modules/eda/ref-performance-troubleshooting.adoc new file mode 100644 index 0000000000..275dff5418 --- /dev/null +++ b/downstream/modules/eda/ref-performance-troubleshooting.adoc @@ -0,0 +1,20 @@ +[id="performance-troubleshooting"] + += Performance Troubleshooting for {EDAcontroller} + +[role="_abstract"] +Based on the default parameters within {EDAcontroller}, you might encounter scenarios that pose challenges to completing your workload. +The following section provides descriptions of these scenarios and troubleshooting guidance. + +* My activation status displays as “running”, but it is not processing the events. +** Ensure that you are using the correct event source in the rulebook activation. +If the event you are expecting is coming from a source other than what is in the rulebook, {EDAcontroller} will not process the event. + +* My activation status displays as “running”, and {EDAcontroller} is also receiving the events, but no actions are occuring. +** Ensure that you have set the correct conditions for matching the event and taking actions in the rulebook activation. + +* My activation keeps restarting in an infinite loop. +** By default, the reset policy for rulebook activations is set to *On Failure*. Change the restart policy using the following procedure: +. Navigate to {MenuADRulebookActivations}. +. Select the *Restart Policy* list to display the options. +. Select the appropriate value: *On Failure*, *Always*, *Never*. diff --git a/downstream/modules/hub/con-approval-pipeline.adoc b/downstream/modules/hub/con-approval-pipeline.adoc index efd5a9b3a0..257ef10e19 100644 --- a/downstream/modules/hub/con-approval-pipeline.adoc +++ b/downstream/modules/hub/con-approval-pipeline.adoc @@ -6,13 +6,13 @@ = Approval pipeline for custom repositories in {HubName} -In {HubName} you can approve collections into any repository marked with the `pipeline=approved` label. By default, {HubName} ships with one repository for approved content, but you have the option to add more from the repository creation screen. You cannot directly publish into a repository marked with the `pipeline=approved` label. A collection must first go through a staging repository and be approved before being published into a 'pipleline=approved' repository. +In {HubName} you can approve collections into any repository marked with the `pipeline=approved` label. By default, {HubName} includes one repository for approved content, but you have the option to add more from the repository creation screen. You cannot directly publish into a repository marked with the `pipeline=approved` label. A collection must first go through a staging repository and be approved before being published into a 'pipleline=approved' repository. Auto approval:: When auto approve is enabled, any collection you upload to a staging repository is automatically promoted to all of the repositories marked as `pipeline=approved`. Approval required:: -When auto approve is disabled, the administrator can view the approval dashboard and see collections that have been uploaded into any of the staging repositories. Clicking btn:[Approve] displays a list of approved repositories. From this list, the administrator can select one or more repositories to which the content should be promoted. +When auto approve is disabled, the administrator can view the approval dashboard and see collections that have been uploaded into any of the staging repositories. Sorting by *Approved* displays a list of approved repositories. From this list, the administrator can select one or more repositories to which the content should be promoted. + If only one approved repository exists, the collection is automatically promoted into it and the administrator is not prompted to select a repository. diff --git a/downstream/modules/hub/con-approval.adoc b/downstream/modules/hub/con-approval.adoc index 43b210b120..500654948d 100644 --- a/downstream/modules/hub/con-approval.adoc +++ b/downstream/modules/hub/con-approval.adoc @@ -2,8 +2,8 @@ = About Approval -You can manage uploaded collections in {HubName} by using the *Approval* feature located in the navigation panel. +You can manage uploaded collections in {HubName} by using the *Collection Approvals* feature located in the navigation panel. Approval Dashboard:: By default, the *Approval* dashboard lists all collections with *Needs Review* status. You can check these for inclusion in your *Published* repository. Viewing collection details:: You can view more information about the collection by clicking the *Version* number. -Filtering collections:: Filter collections by *Namespace*, *Collection Name* or *Repository*, to locate content and update its status. +Filtering collections:: Filter collections by *Namespace*, *Collection*, or *Repository* to locate content and update its status. diff --git a/downstream/modules/hub/con-container-registry.adoc b/downstream/modules/hub/con-container-registry.adoc index f412919450..58380c01ca 100644 --- a/downstream/modules/hub/con-container-registry.adoc +++ b/downstream/modules/hub/con-container-registry.adoc @@ -6,13 +6,13 @@ [role="_abstract"] -The {HubName} container registry is used for storing and managing container images. -When you have built or sourced a container image, you can push that container image to the registry portion of {PrivateHubName} to create a container repository. +The {HubName} remote registry is used for storing and managing {ExecEnvShort}s. +When you have built or sourced an {ExecEnvShort}, you can push that {ExecEnvShort} to the registry portion of {PrivateHubName} to create a container repository. [role="_additional-resources"] .Next steps -* Push a container image to the {HubName} container registry. +* Push an {ExecEnvShort} to the {HubName} remote registry. * Create a group with access to the container repository in the registry. * Add the new group to the container repository. * Add a README to the container repository to provide users with information and relevant links. diff --git a/downstream/modules/hub/con-offline-token-active.adoc b/downstream/modules/hub/con-offline-token-active.adoc index c52d398eb5..dd406a7b75 100644 --- a/downstream/modules/hub/con-offline-token-active.adoc +++ b/downstream/modules/hub/con-offline-token-active.adoc @@ -1,7 +1,7 @@ -[id="con-offline-token-active"] +[id="con-offline-token-active_{context}"] -= Keeping your offline token active +== Keeping your offline token active Offline tokens expire after 30 days of inactivity. You can keep your offline token from expiring by periodically refreshing your offline token. @@ -9,7 +9,7 @@ Keeping an online token active is useful when an application performs an action [NOTE] ==== -If your offline token expires, you must request a new one. +If your offline token expires, you must xref:proc-create-api-token_cloud-sync[obtain a new one]. ==== .Procedure diff --git a/downstream/modules/hub/con-repo-rbac.adoc b/downstream/modules/hub/con-repo-rbac.adoc index d325971e3e..763d3811b3 100644 --- a/downstream/modules/hub/con-repo-rbac.adoc +++ b/downstream/modules/hub/con-repo-rbac.adoc @@ -6,4 +6,4 @@ = Role based access control to restrict access to custom repositories -Use Role Based Access Control (RBAC) to restrict user access to custom repositories by defining access permissions based on user roles. By default, users can view all public repositories in their {HubName}, but they cannot modify a repository unless their role allows them access to do so. The same logic applies to other operations on the repository. For example, you can remove a user's ability to download content from a custom repository by changing their role permissions. See link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_automation_hub/assembly-user-access[Configuring user access for your {PrivateHubName}] for information about managing user access in {HubName}. +Use Role Based Access Control (RBAC) to restrict user access to custom repositories by defining access permissions based on user roles. By default, users can view all public repositories in their {HubName}, but they cannot modify a repository unless their role allows them access to do so. The same logic applies to other operations on the repository. For example, you can remove a user's ability to download content from a custom repository by changing their role permissions. See link:{LinkCentralAuth} for information about managing user access to {HubName}. diff --git a/downstream/modules/hub/con-rh-certified-synclist.adoc b/downstream/modules/hub/con-rh-certified-synclist.adoc index efa55f8cce..f85e7210e6 100644 --- a/downstream/modules/hub/con-rh-certified-synclist.adoc +++ b/downstream/modules/hub/con-rh-certified-synclist.adoc @@ -2,10 +2,10 @@ = Explanation of Red Hat {CertifiedName} synclists -A synclist is a curated group of Red Hat Certified Collections that is assembled by your organization administrator. +A synclist is a curated group of Red Hat Certified Collections assembled by your organization administrator. It synchronizes with your local {HubNameMain}. Use synclists to manage only the content that you want and exclude unnecessary collections. Design and manage your synclist from the content available as part of Red Hat content on {Console} -Each synclist has its own unique repository URL that you can use to designate as a remote source for content in {HubName}. +Each synclist has its own unique repository URL that you can designate as a remote source for content in {HubName}. You securely access each synclist by using an API token. \ No newline at end of file diff --git a/downstream/modules/hub/con-token-management-hub.adoc b/downstream/modules/hub/con-token-management-hub.adoc new file mode 100644 index 0000000000..8c63acb7e9 --- /dev/null +++ b/downstream/modules/hub/con-token-management-hub.adoc @@ -0,0 +1,19 @@ +:_newdoc-version: 2.18.3 +:_template-generated: 2024-11-19 + +:_mod-docs-content-type: CONCEPT + +[id="token-management-hub_{context}"] += Token management in {HubName} + +Before you can interact with {HubName} by uploading or downloading collections, you must create an API token. The {HubName} API token authenticates your `ansible-galaxy` client to the Red Hat {HubName} server. + +Your method for creating the API token differs according to the type of {HubName} that you are using: + +* {HubNameStart} uses offline token management. See xref:proc-create-api-token_cloud-sync[Creating the offline token in {HubName}]. + +* {PrivateHubNameStart} uses API token management. See xref:proc-create-api-token-pah_cloud-sync[Creating the API token in {PrivateHubName}]. + +* If you are using Keycloak to authenticate your {PrivateHubName}, follow the procedure for xref:proc-create-api-token_cloud-sync[Creating the offline token in {HubName}]. + + diff --git a/downstream/modules/hub/proc-add-container-readme.adoc b/downstream/modules/hub/proc-add-container-readme.adoc index a70afd8298..ff4fdcc3f0 100644 --- a/downstream/modules/hub/proc-add-container-readme.adoc +++ b/downstream/modules/hub/proc-add-container-readme.adoc @@ -26,10 +26,10 @@ By default, the README is empty. * You have permissions to change containers. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to {HubName}. + +. Log in to {PlatformNameShort}. . From the navigation panel, select {MenuACExecEnvironments}. -. Select your container repository. +. Select your {ExecEnvShort}. . On the *Detail* tab, click btn:[Add]. . In the *Raw Markdown* text field, enter your README text in Markdown. . Click btn:[Save] when you are finished. diff --git a/downstream/modules/hub/proc-add-group-to-container-repo.adoc b/downstream/modules/hub/proc-add-group-to-container-repo.adoc index 4454281db4..2f7d7394f5 100644 --- a/downstream/modules/hub/proc-add-group-to-container-repo.adoc +++ b/downstream/modules/hub/proc-add-group-to-container-repo.adoc @@ -1,22 +1,22 @@ [id="providing-access-to-containers"] -= Providing access to your container repository += Providing access to your {ExecEnvName} [role="_abstract"] -Provide access to your container repository for users who need to work with the images. -Adding a group allows you to modify the permissions the group can have to the container repository. -You can use this option to extend or restrict permissions based on what the group is assigned. +Provide access to your {ExecEnvName} for users who need to work with the images. +Adding a team allows you to modify the permissions the team can have to the container repository. +You can use this option to extend or restrict permissions based on what the team is assigned. .Prerequisites * You have *change container namespace* permissions. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to {HubName}. + +. Log in to {PlatformNameShort}. . From the navigation panel, select {MenuACExecEnvironments}. -. Select your container repository. -. From the *Access* tab, click btn:[Select a group]. -. Select the group or groups to which you want to grant access and click btn:[Next]. +. Select your {ExecEnvNameSing}. +. From the *Team Access* tab, click btn:[Add roles]. +. Select the team or teams to which you want to grant access and click btn:[Next]. . Select the roles that you want to add to this {ExecEnvShort} and click btn:[Next]. -. Click btn:[Add]. +. Click btn:[Finish]. diff --git a/downstream/modules/hub/proc-adding-an-execution-environment.adoc b/downstream/modules/hub/proc-adding-an-execution-environment.adoc index eb729a3453..6b2e375cbd 100644 --- a/downstream/modules/hub/proc-adding-an-execution-environment.adoc +++ b/downstream/modules/hub/proc-adding-an-execution-environment.adoc @@ -1,31 +1,32 @@ [id="adding-an-execution-environment"] -= Adding an {ExecEnvShort} -{ExecEnvNameStart} are container images that make it possible to incorporate system-level dependencies and collection-based content. -Each {ExecEnvShort} allows you to have a customized image to run jobs, and each of them contain only what you need when running the job. += Adding and signing an {ExecEnvShort} +{ExecEnvNameStart} are container images that make it possible to incorporate system-level dependencies and collection-based content. Each {ExecEnvShort} allows you to have a customized image to run jobs, and each of them contain only what you need when running the job. .Procedure . From the navigation panel, select {MenuACExecEnvironments}. -. Click btn:[Add execution environment]. +. Click btn:[Create execution environment] and enter the relevant information in the fields that appear. -. Enter the name of the {ExecEnvShort}. +.. The *Name* field displays the name of the {ExecEnvShort} on your local registry. -. Optional: Enter the upstream name. +.. The *Upstream name* field is the name of the image on the remote server. -. Under *Registry*, select the name of the registry from the drop-down menu. +.. Under *Registry*, select the name of the registry from the drop-down menu. -. Enter tags in the *Add tag(s) to include* field. +.. Optional: Enter tags in the *Add tag(s) to include* field. If the field is blank, all the tags are passed. -You must specify which repository specific tags to pass. +You must specify which repository-specific tags to pass. -. The remaining fields are optional: -* *Currently included tags* -* *Add tag(s) to exclude* -* *Currently excluded tag(s)* -* *Description* +.. Optional: Enter tags to exclude in the *Add tag(s) to exclude* field. -. Click btn:[Save]. +. Click btn:[Create {ExecEnvShort}]. You should see your new {ExecEnvShort} in the list that appears. -. Synchronize the image. +. Sync and sign your new {ExecEnvNameSing}. + +.. Click the btn:[More Actions] icon *{MoreActionsIcon}* and select *Sync execution environment*. + +.. Click the btn:[More Actions] icon *{MoreActionsIcon}* and select *Sign execution environment*. + +. Click on your new {ExecEnvShort}. On the Details page, find the *Signed* label to determine that your {ExecEnvShort} has been signed. diff --git a/downstream/modules/hub/proc-adding-collections-repository.adoc b/downstream/modules/hub/proc-adding-collections-repository.adoc index ebe2430159..aed460fad9 100644 --- a/downstream/modules/hub/proc-adding-collections-repository.adoc +++ b/downstream/modules/hub/proc-adding-collections-repository.adoc @@ -9,7 +9,7 @@ After you create your repository, you can begin adding automation content collec .Procedure . From the navigation panel, select {MenuACAdminRepositories}. -. Locate your repository in the list and click the btn:[More Actions] icon *{MoreActionsIcon}*, then select *Edit*. -. Select the *Collections version* tab. -. Click btn:[Add Collection] and select the collections that you want to add to your repository. +. Click into your repository in the list. +. Select the *Collection versions* tab. +. Click btn:[Add Collections] and select the collections that you want to add to your repository. . Click btn:[Select]. diff --git a/downstream/modules/hub/proc-adding-containers-remotely-to-the-automation-hub.adoc b/downstream/modules/hub/proc-adding-containers-remotely-to-the-automation-hub.adoc index d2f595e4fb..6fe595aa65 100644 --- a/downstream/modules/hub/proc-adding-containers-remotely-to-the-automation-hub.adoc +++ b/downstream/modules/hub/proc-adding-containers-remotely-to-the-automation-hub.adoc @@ -6,17 +6,16 @@ You can add containers remotely to {HubName} in one of the following two ways: -* Create Remotes -* Execution Environment +* By creating remotes +* By using an {ExecEnvNameSing} .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to {HubName}. +. Log in to {PlatformNameShort}. . From the navigation panel, select {MenuACAdminRemoteRegistries}. -. Click btn:[Add remote registry]. +. Click btn:[Create remote registry]. * In the *Name* field, enter the name of the registry where the container resides. @@ -26,4 +25,4 @@ You can add containers remotely to {HubName} in one of the following two ways: * In the *Password* field, enter the password if necessary. -* Click btn:[Save]. +* Click btn:[Create remote registry]. diff --git a/downstream/modules/hub/proc-approve-collection.adoc b/downstream/modules/hub/proc-approve-collection.adoc index 816964c920..2659a2d675 100644 --- a/downstream/modules/hub/proc-approve-collection.adoc +++ b/downstream/modules/hub/proc-approve-collection.adoc @@ -4,7 +4,7 @@ = Approving collections for internal publication -You can approve collections uploaded to individual namespaces for internal publication and use. All collections awaiting review are located under the *Approval* tab in the *Staging* repository. +You can approve collections uploaded to individual namespaces for internal publication and use. All collections awaiting review are located in {MenuACAdminCollectionApproval}. .Prerequisites @@ -16,8 +16,7 @@ You can approve collections uploaded to individual namespaces for internal publi + Collections requiring approval have the status *Needs review*. + -. Select a collection to review. -. Click the *Version* to view the contents of the collection. -. Click btn:[Certify] to approve the collection. +. Find the collection you want to review in the list. You can also filter collections by Namespace, Repository, and Status using the search bar. +. Click the thumbs up icon to approve and sign the collection. Confirm your choice in the modal that appears. Approved collections are moved to the *Published* repository where users can view and download them for use. diff --git a/downstream/modules/hub/proc-basic-repo-sync.adoc b/downstream/modules/hub/proc-basic-repo-sync.adoc index b5193b9e52..d4f6f292a3 100644 --- a/downstream/modules/hub/proc-basic-repo-sync.adoc +++ b/downstream/modules/hub/proc-basic-repo-sync.adoc @@ -5,10 +5,10 @@ .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to {HubName}. + +. Log in to {PlatformNameShort}. . From the navigation panel, select {MenuACAdminRepositories}. -. Locate your repository in the list and click *Sync*. +. Locate your repository in the list and click btn:[More Actions] icon *{MoreActionsIcon}*, then select *Sync repository*. + All collections in the configured remote are downloaded to your custom repository. To check the status of the collection sync, select {MenuACAdminTasks} from the navigation panel. + @@ -19,4 +19,4 @@ To limit repository synchronization to specific collections within a remote, you [role="_additional-resources"] .Additional resources -For more information about using requirements files, see link:https://docs.ansible.com/ansible/latest/collections_guide/collections_installing.html#install-multiple-collections-with-a-requirements-file[Install multiple collections with a requirements file] in the _Using Ansible collections_ guide. +For more information about using requirements files, see link:{URLHubManagingContent}/managing-cert-valid-content#create-requirements-file_managing-cert-validated-content[Creating a requirements file]. diff --git a/downstream/modules/hub/proc-configure-ansible-galaxy-cli-verify.adoc b/downstream/modules/hub/proc-configure-ansible-galaxy-cli-verify.adoc index 7b92856c12..4c82504b77 100644 --- a/downstream/modules/hub/proc-configure-ansible-galaxy-cli-verify.adoc +++ b/downstream/modules/hub/proc-configure-ansible-galaxy-cli-verify.adoc @@ -5,7 +5,7 @@ You can configure Ansible-Galaxy CLI to verify collections. This ensures that downloaded collections are approved by your organization and have not been changed after they were uploaded to {HubName}. -If a collection has been signed by {HubName}, the server provides ASCII armored, GPG-detached signatures to verify the authenticity of `MANIFEST.json` before using it to verify the collection’s contents. +If a collection has been signed by {HubName}, the server provides ASCII armored, GPG-detached signatures to verify the authenticity of `MANIFEST.json` before using it to verify the collection's contents. You must opt into signature verification by link:https://docs.ansible.com/ansible/devel/reference_appendices/config.html#galaxy-gpg-keyring[configuring a keyring] for `ansible-galaxy` or providing the path with the `--keyring` option. .Prerequisites @@ -24,7 +24,7 @@ gpg --import --no-default-keyring --keyring ~/.ansible/pubring.kbx my-public-key + [NOTE] ==== -In addition to any signatures provided by the {HubName}, signature sources can also be provided in the requirements file and on the command line. +In addition to any signatures provided by {HubName}, signature sources can also be provided in the requirements file and on the command line. Signature sources should be URIs. ==== + @@ -61,8 +61,8 @@ Create a collection with `company_name.product` format. This format means that multiple products can have different collections under the company namespace. [discrete] -= How do I get a namespace on {HubNameMain}? += How do I get a namespace on {HubName}? -By default namespaces used on {Galaxy} are also used on {HubNameMain} by the Ansible partner team. +By default namespaces used on {Galaxy} are also used on {HubName} by the Ansible partner team. For any queries and clarifications contact ansiblepartners@redhat.com. diff --git a/downstream/modules/hub/proc-configure-automation-hub-server-gui.adoc b/downstream/modules/hub/proc-configure-automation-hub-server-gui.adoc index 954c85cdb8..2ce8d51859 100644 --- a/downstream/modules/hub/proc-configure-automation-hub-server-gui.adoc +++ b/downstream/modules/hub/proc-configure-automation-hub-server-gui.adoc @@ -16,7 +16,7 @@ Creating a new token revokes any previous tokens generated for {HubName}. Update . Navigate to your {ControllerName}. . Create a new credential. -.. Navigate to {MenuAMCredentials}. +.. Navigate to {MenuAECredentials}. .. Click btn:[Add]. .. Enter the name for your new credential in the *Name* field. .. Optional: Enter a description and enter or select the name of the organization with which the credential is associated. diff --git a/downstream/modules/hub/proc-configure-content-signing-on-pah.adoc b/downstream/modules/hub/proc-configure-content-signing-on-pah.adoc index 6297da4ff2..cf8ee56444 100644 --- a/downstream/modules/hub/proc-configure-content-signing-on-pah.adoc +++ b/downstream/modules/hub/proc-configure-content-signing-on-pah.adoc @@ -53,7 +53,6 @@ else exit $STATUS fi ---- - + After you deploy a {PrivateHubName} with signing enabled to your {PlatformNameShort} cluster, new UI additions are displayed in collections. diff --git a/downstream/modules/hub/proc-configure-proxy-remote.adoc b/downstream/modules/hub/proc-configure-proxy-remote.adoc index 9969e092b4..74313edbbb 100644 --- a/downstream/modules/hub/proc-configure-proxy-remote.adoc +++ b/downstream/modules/hub/proc-configure-proxy-remote.adoc @@ -11,14 +11,14 @@ If your {PrivateHubName} is behind a network proxy, you can configure proxy sett .Prerequisites * You have valid *Modify Ansible repo content* permissions. -For more information on permissions, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_automation_hub/assembly-user-access[Configuring user access for your {PrivateHubName}]. +For more information on permissions, see link:{LinkCentralAuth} * You have a proxy URL and credentials from your local network administrator. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to {PrivateHubName}. + +. Log in to {PlatformNameShort}. . From the navigation panel, select {MenuACAdminRemotes}. -. In either the *rh-certified* or *Community* remote, click the btn:[More Actions] icon *{MoreActionsIcon}* and select *Edit*. +. In either the *rh-certified* or *Community* remote, click the btn:[More Actions] icon *{MoreActionsIcon}* and select *Edit remote*. . Expand the *Show advanced options* drop-down menu. . Enter your proxy URL, proxy username, and proxy password in the appropriate fields. -. Click btn:[Save]. +. Click btn:[Save remote]. diff --git a/downstream/modules/hub/proc-configuring-the-client-to-verify-signatures.adoc b/downstream/modules/hub/proc-configuring-the-client-to-verify-signatures.adoc index 2a9149809a..20a554844a 100644 --- a/downstream/modules/hub/proc-configuring-the-client-to-verify-signatures.adoc +++ b/downstream/modules/hub/proc-configuring-the-client-to-verify-signatures.adoc @@ -3,7 +3,7 @@ = Configuring the client to verify signatures -To ensure a container image pulled from the remote registry is properly signed, you must first configure the image with the proper public key in a policy file. +To ensure an {ExecEnvShort} pulled from the remote registry is properly signed, you must first configure the {ExecEnvShort} with the proper public key in a policy file. .Prerequisites * The client must have sudo privileges configured to verify signatures. @@ -75,7 +75,7 @@ name of your key file. > podman pull /: --tls-verify=false ---- -This response verifies the image has been signed with no errors. If the image is not signed, the command fails. +This response verifies the {ExecEnvShort} has been signed with no errors. If the {ExecEnvShort} is not signed, the command fails. .Additional resources * For more information about policy.json, see link:https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#signedby[documentation for containers-policy.json]. \ No newline at end of file diff --git a/downstream/modules/hub/proc-create-api-token-pah.adoc b/downstream/modules/hub/proc-create-api-token-pah.adoc index 5fefff2d10..ef67132ce3 100644 --- a/downstream/modules/hub/proc-create-api-token-pah.adoc +++ b/downstream/modules/hub/proc-create-api-token-pah.adoc @@ -1,7 +1,7 @@ // Module included in the following assemblies: // obtaining-token/master.adoc -[id="proc-create-api-token-pah"] -= Creating the API token in {PrivateHubName} +[id="proc-create-api-token-pah_{context}"] +== Creating the API token in {PrivateHubName} In {PrivateHubName}, you can create an API token using API token management. The API token is a secret token used to protect your content. diff --git a/downstream/modules/hub/proc-create-api-token.adoc b/downstream/modules/hub/proc-create-api-token.adoc index c7b450b3a9..2d44eede67 100644 --- a/downstream/modules/hub/proc-create-api-token.adoc +++ b/downstream/modules/hub/proc-create-api-token.adoc @@ -1,26 +1,26 @@ // Module included in the following assemblies: // obtaining-token/master.adoc -[id="proc-create-api-token"] -= Creating the API token in {HubName} +[id="proc-create-api-token_{context}"] +== Creating the offline token in {HubName} -In {HubName}, you can create an API token by using *Token management*. The API token is a secret token used to protect your content. +In {HubName}, you can create an offline token by using *Token management*. The offline token is a secret token used to protect your content. .Procedure . Navigate to link:https://console.redhat.com/ansible/automation-hub/token/[{PlatformNameShort} on the Red Hat Hybrid Cloud Console]. . From the navigation panel, select menu:Automation Hub[Connect to Hub]. . Under *Offline token*, click btn:[Load Token]. -. Click the btn:[Copy to clipboard] icon to copy the API token. +. Click the btn:[Copy to clipboard] icon to copy the offline token. . Paste the API token into a file and store in a secure location. [IMPORTANT] ==== -The API token is a secret token used to protect your content. Store your API token in a secure location. +The offline token is a secret token used to protect your content. Store your token in a secure location. ==== -The API token is now available for configuring {HubName} as your default collections server or for uploading collections by using the `ansible-galaxy` command line tool. +The offline token is now available for configuring {HubName} as your default collections server or for uploading collections by using the `ansible-galaxy` command line tool. [NOTE] ==== -The API token does not expire. +Your offline token expires after 30 days of inactivity. For more on obtaining a new offline token, see xref:con-offline-token-active_cloud-sync[Keeping your offline token active]. ==== \ No newline at end of file diff --git a/downstream/modules/hub/proc-create-content-developers.adoc b/downstream/modules/hub/proc-create-content-developers.adoc index b17a2cb1b3..086c1bbbcc 100644 --- a/downstream/modules/hub/proc-create-content-developers.adoc +++ b/downstream/modules/hub/proc-create-content-developers.adoc @@ -1,26 +1,34 @@ [id="proc-create-content-developers"] -= Creating a new group for content curators += Creating a new team for content curators -You can create a new group in {PrivateHubName} designed to support content curation in your organization. This group can contribute internally developed collections for publication in {PrivateHubName}. +You can create a new team in {PlatformNameShort} designed to support content curation in your organization. This team can contribute internally-developed collections for publication in {PrivateHubName}. -To help content developers create a namespace and upload their internally developed collections to {PrivateHubName}, you must first create and edit a group and assign the required permissions. +To help content developers create a namespace and upload their internally developed collections to {PrivateHubName}, you must first create and edit a team and assign the required permissions. .Prerequisites -* You have administrative permissions in {PrivateHubName} and can create groups. +* You have administrative permissions in {PlatformNameShort} and can create teams. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to your {PrivateHubName}. -. From the navigation panel, select {MenuHubGroups} and click btn:[Create]. -. Enter *Content Engineering* as a *Name* for the group in the modal and click btn:[Create]. You have created the new group and the *Groups* page opens. -. On the *Permissions* tab, click btn:[Edit]. -. Under *Namespaces*, add permissions for *Add Namespace*, *Upload to Namespace*, and *Change Namespace*. -. Click btn:[Save]. + +. Log in to your {PlatformNameShort}. +. From the navigation panel, select {MenuAMTeams} and click btn:[Create team]. +. Enter *Content Engineering* as a *Name* for the team. +. Select an *Organization* for the team. +. Click btn:[Create team]. You have created the new team and the team Details page opens. +. Select the *Roles* tab and then select the *Automation Content* tab. +. Click btn:[Add roles]. +. Select *Namespace* from the *Resource type* list and click btn:[Next]. +. Select the namespaces that will receive the new roles and click btn:[Next]. +. Select the roles to apply to the selected namespaces and click btn:[Next]. +. Review your selections and click btn:[Finish]. +. Click btn:[Close] to complete the process. + -The new group is created with the permissions that you assigned. You can then add users to the group. +The new team is created with the permissions that you assigned. You can then add users to the team. + -. Click the *Users* tab on the *Groups* page. -. Click btn:[Add]. -. Select users and click btn:[Add]. +. Click the *Users* tab on the *Teams* page. +. Click btn:[Add users]. +. Select users and click btn:[Add users]. + +For further instructions on managing access with teams, see link:{URLCentralAuth}/gw-managing-access#assembly-controller-teams_gw-manage-rbac[Teams] in the {TitleCentralAuth} guide. \ No newline at end of file diff --git a/downstream/modules/hub/proc-create-credential.adoc b/downstream/modules/hub/proc-create-credential.adoc index 53aa31498b..96c682df39 100644 --- a/downstream/modules/hub/proc-create-credential.adoc +++ b/downstream/modules/hub/proc-create-credential.adoc @@ -1,25 +1,25 @@ [id="proc-create-credential"] -= Creating a credential in {ControllerName} += Creating a credential -To pull container images from a password or token-protected registry, you must create a credential in {ControllerName}. +To pull {ExecEnvName} images from a password or token-protected registry, you must create a credential. In earlier versions of {PlatformNameShort}, you were required to deploy a registry to store {ExecEnvShort} images. -On {PlatformNameShort} 2.0 and later, the system operates as if you already have a container registry up and running. -To store {ExecEnvShort} images, add the credentials of only your selected container registries. +On {PlatformNameShort} 2.0 and later, the system operates as if you already have a remote registry up and running. +To store {ExecEnvShort} images, add the credentials of only your selected remote registries. .Procedure -// For 2.5 this will be Log in to Ansible Automation Platform. From the navigation panel select Access Management > Credentials. Select the Automation Execution tab -. Navigate to {ControllerName}. -. From the navigation panel, select {MenuAMCredentials}. -. Click btn:[Add] to create a new credential. + +. Log in to {PlatformNameShort}. +. From the navigation panel, select {MenuAECredentials}. +. Click btn:[Create credential] to create a new credential. . Enter an authorization *Name*, *Description*, and *Organization*. -. Select the *Credential Type*. -. Enter the *Authentication URL*. This is the container registry address. -. Enter the *Username* and *Password or Token* required to log in to the container registry. +. In the *Credential Type* drop-down, select *Container Registry*. +. Enter the *Authentication URL*. This is the remote registry address. +. Enter the *Username* and *Password or Token* required to log in to the remote registry. . Optional: To enable SSL verification, select *Verify SSL*. -. Click btn:[Save]. +. Click btn:[Create credential]. -Filling in at least one of the fields organization, user, or team is mandatory, and can be done through the user interface +Filling in at least one of the fields organization, user, or team is mandatory, and can be done through the user interface. //[dcd-This should be replaced with a link; otherwise, it's not helpful]For more information, please reference the Pulling from Protected Registries section of the Execution Environment documentation. diff --git a/downstream/modules/hub/proc-create-groups.adoc b/downstream/modules/hub/proc-create-groups.adoc index 352abdfce1..336d2211d6 100644 --- a/downstream/modules/hub/proc-create-groups.adoc +++ b/downstream/modules/hub/proc-create-groups.adoc @@ -2,9 +2,9 @@ // obtaining-token/master.adoc [id="proc-create-group"] -= Creating a new group in {PrivateHubName} += Creating a new team in {PrivateHubName} -You can create and assign permissions to a group in {PrivateHubName} that enables users to access specified features in the system. -By default, the *Admin* group in the {HubName} has all permissions assigned and is available on initial login. Use the credentials created when installing {PrivateHubName}. +You can create and assign permissions to a team in {PrivateHubName} that enables users to access specified features in the system. +By default, new teams do not have any assigned permissions. You can add permissions when first creating a team or edit an existing team to add or remove permissions. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/getting_started_with_automation_hub/index#proc-create-group[Creating a new group in {PrivateHubName}] in the Getting started with {HubName} guide. +For more information, see link:{URLCentralAuth}/gw-managing-access#assembly-controller-teams_gw-manage-rbac[Teams] in the {TitleCentralAuth} guide. diff --git a/downstream/modules/hub/proc-create-namespace.adoc b/downstream/modules/hub/proc-create-namespace.adoc index f101a287f0..a9635119f3 100644 --- a/downstream/modules/hub/proc-create-namespace.adoc +++ b/downstream/modules/hub/proc-create-namespace.adoc @@ -3,18 +3,23 @@ = Creating a namespace You can create a namespace to organize collections that your content developers upload to {HubName}. -When creating a namespace, you can assign a group in {HubName} as owners of that namespace. +When creating a namespace, you can assign a team in {HubName} as owners of that namespace. .Prerequisites * You have *Add Namespaces* and *Upload to Namespaces* permissions. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to your {PrivateHubName}. + +. Log in to your {PlatformNameShort}. . From the navigation panel, select {MenuACNamespaces}. -. Click btn:[Create] and enter a *namespace name*. -. Assign a group of *Namespace owners*. -. Click btn:[Create]. +. Click btn:[Create namespace] and enter a *Name* for your namespace. +. Optional: enter a description, company, logo URL, resources, or useful links in the appropriate fields. +. Click btn:[Create namespace]. +. Select the *Team Access* tab and click btn:[Add roles] to assign roles to your namespace. +. Select the team to which you want to grant a role, then click btn:[Next]. +. Select the roles you want to apply to the selected team, and then click btn:[Next]. +. Review your selections and click btn:[Finish]. +. Click btn:[Close] to complete the process. -Your content developers can now upload collections to your new namespace and allow users in groups assigned as owners to upload collections. +Your content developers can now upload collections to your new namespace and allow users in teams assigned as owners to upload collections. diff --git a/downstream/modules/hub/proc-create-remote.adoc b/downstream/modules/hub/proc-create-remote.adoc index 2629860411..9569e1b46a 100644 --- a/downstream/modules/hub/proc-create-remote.adoc +++ b/downstream/modules/hub/proc-create-remote.adoc @@ -8,16 +8,16 @@ You can use {PlatformName} to create a remote configuration to an external collection source. Then, you can sync the content from those collections to your custom repositories. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to {HubName}. + +. Log in to {PlatformNameShort}. . From the navigation panel, select {MenuACAdminRemotes}. -. Click btn:[Add Remote]. +. Click btn:[Create Remote]. . Enter a *Name* for the remote configuration. . Enter the *URL* for the remote server, including the path for the specific repository. + [NOTE] ==== -To find the remote server URL and repository path, navigate to {MenuACAdminRepositories}, select your repository, and click btn:[Copy CLI configuration]. +To find the remote server URL and repository path, navigate to {MenuACAdminRepositories}, select the btn:[More Actions] icon *{MoreActionsIcon}*, and select btn:[Copy CLI configuration]. ==== + . Configure the credentials to the remote server by entering a *Token* or *Username* and *Password* required to access the external collection. @@ -28,7 +28,7 @@ To generate a token from the navigation panel, select {MenuACAPIToken}, click bt ==== + . To access collections from {Console}, enter the *SSO URL* to sign in to the identity provider (IdP). -. Select or create a *YAML requirements* file to identify the collections and version ranges to synchronize with your custom repository. For example, to download only the kubernetes and AWS collection versions 5.0.0 or later the requirements file would look like this: +. Select or create a *Requirements file* to identify the collections and version ranges to synchronize with your custom repository. For example, to download only the kubernetes and AWS collection versions 5.0.0 or later the requirements file would look like this: + ----- Collections: @@ -42,7 +42,7 @@ Collections: All collection dependencies are downloaded during the Sync process. ==== + -. Optional: To configure your remote further, use the options available under *Advanced configuration*: +. Optional: To configure your remote further, use the options available under *Show advanced options*: .. If there is a corporate proxy in place for your organization, enter a *Proxy URL*, *Proxy Username* and *Proxy Password*. .. Enable or disable transport layer security using the *TLS validation* checkbox. .. If digital certificates are required for authentication, enter a *Client key* and *Client certificate*. diff --git a/downstream/modules/hub/proc-create-repository.adoc b/downstream/modules/hub/proc-create-repository.adoc index 1bb21abc7e..04fc8cb8a3 100644 --- a/downstream/modules/hub/proc-create-repository.adoc +++ b/downstream/modules/hub/proc-create-repository.adoc @@ -8,13 +8,13 @@ When you use {PlatformName} to create a repository, you can configure the repository to be private or hide it from search results. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to {HubName}. + +. Log in to {PlatformNameShort}. . From the navigation panel, select {MenuACAdminRepositories}. -. Click btn:[Add repository]. -. Enter a *Repository name*. +. Click btn:[Create repository]. +. Enter a *Name* for your repository. . In the *Description* field, describe the purpose of the repository. -. To retain previous versions of your repository each time you make a change, select *Retained number of versions*. The number of retained versions can range anywhere between 0 and unlimited. To save all versions, leave this set to null. +. To retain previous versions of your repository each time you make a change, enter a figure in the field labeled *Retained number of versions*. The number of retained versions can range anywhere between 0 and unlimited. To save all versions, leave this set to null. + [NOTE] ==== @@ -27,10 +27,10 @@ Staging:: Anyone is allowed to publish automation content into the repository. Approved:: Collections added to this repository are required to go through the approval process by way of the staging repository. When auto approve is enabled, any collection uploaded to a staging repository is automatically promoted to all of the approved repositories. None:: Any user with permissions on the repository can publish to the repository directly, and the repository is not part of the approval pipeline. + -. Optional: To hide the repository from search results, select *Hide from search*. This option is selected by default. +. Optional: To hide the repository from search results, select *Hide from search*. . Optional: To make the repository private, select *Make private*. This hides the repository from anyone who does not have permissions to view the repository. -. To sync the content from a remote repository into this repository, select *Remote* and select the remote that contains the collections you want included in your custom repository. For more information, see xref:proc-basic-repo-sync[Repository sync]. -. Click btn:[Save]. +. To sync the content from a remote repository into this repository, in the *Remote* field select the remote that contains the collections you want included in your custom repository. For more information, see xref:proc-basic-repo-sync[Repository sync]. +. Click btn:[Create repository]. [role="_additional-resources"] .Next steps diff --git a/downstream/modules/hub/proc-create-requirements-file.adoc b/downstream/modules/hub/proc-create-requirements-file.adoc new file mode 100644 index 0000000000..2dd6a09b65 --- /dev/null +++ b/downstream/modules/hub/proc-create-requirements-file.adoc @@ -0,0 +1,42 @@ +:_newdoc-version: 2.18.3 +:_template-generated: 2024-09-18 +:_mod-docs-content-type: PROCEDURE + +[id="create-requirements-file_{context}"] += Creating a requirements file + +Use a requirements file to add collections to your {HubName}. Requirements files are in YAML format and list the collections that you want to install in your {HubName}. After you create your requirements.yml file listing the collections you want to install, you will then run the install command to add the collections to your hub instance. + +A standard `requirements.yml` file contains the following parameters: + +* `name`: the name of the collection formatted as `.` +* `version`: the collection version number + +.Procedure + +. Create your requirements file. ++ +In YAML format, collection information in your requirements file should look like this: ++ +[source,bash] +---- +collections: + name: namespace.collection_name + version: 1.0.0 +---- ++ +. After you have created your requirements file listing information for each collection that you want to install, navigate to the directory where the file is located and run the following command: + +[source,bash] +---- +$ ansible-galaxy collection install -r requirements.yml +---- + +== Installing an individual collection from the command line + +To install an individual collection to your {HubName}, run the following command: + +[source,bash] +---- +$ ansible-galaxy collection install namespace.collection_name +---- diff --git a/downstream/modules/hub/proc-create-synclist.adoc b/downstream/modules/hub/proc-create-synclist.adoc index 5239b778db..23976bf9dc 100644 --- a/downstream/modules/hub/proc-create-synclist.adoc +++ b/downstream/modules/hub/proc-create-synclist.adoc @@ -13,7 +13,7 @@ All {CertifiedName} are included by default in your initial organization synclis .Prerequisites * You have a valid {PlatformNameShort} subscription. -* You have Organization Administrator permissions for {Console}. +* You have organization administrator permissions for {Console}. * The following domain names are part of either the firewall or the proxy's allowlist. They are required for successful connection and download of collections from {HubName} or Galaxy server: ** `galaxy.ansible.com` @@ -27,10 +27,16 @@ The following domain names must be in the allow list: * SSL inspection is disabled either when using self signed certificates or for the Red Hat domains. .Procedure -// ddacosta I don't know if a change will be needed here for Gateway as this is referring to the Console version of Hub. Will console pull in nav changes? Also, there is no repositories selection on the console version right now. + . Log in to `{Console}`. . Navigate to menu:Automation Hub[Collections]. -. Set the toggle switch on each collection to exclude or include it on your synclist. -. To initiate the remote repository synchronization, navigate to {HubName} and select {MenuACAdminRepositories}. -. Click the btn:[More Actions] icon *{MoreActionsIcon}* and select *Sync* to initiate the remote repository synchronization to your {PrivateHubName}. +. Set the *Sync* toggle switch on each collection to exclude or include it on your synclist. ++ +[NOTE] +==== +You will only see the *Sync* toggle switch if you have administrator permissions. +==== ++ +. To initiate the remote repository synchronization, navigate to your {PlatformNameShort} and select {MenuACAdminRepositories}. +. In the row containing the repository you want to sync, click the btn:[More Actions] icon *{MoreActionsIcon}* and select *Sync repository* to initiate the remote repository synchronization to your {PrivateHubName}. . Optional: If your remote repository is already configured, update the collections content that you made available to local users by manually synchronizing Red Hat {CertifiedName} to your {PrivateHubName}. diff --git a/downstream/modules/hub/proc-delete-namespace.adoc b/downstream/modules/hub/proc-delete-namespace.adoc index 2498bcbd06..f47e79888d 100644 --- a/downstream/modules/hub/proc-delete-namespace.adoc +++ b/downstream/modules/hub/proc-delete-namespace.adoc @@ -5,19 +5,24 @@ = Deleting a namespace You can delete unwanted namespaces to manage storage on your {HubName} server. -You must first ensure that the namespace does not contain a collection with dependencies. +You must first ensure that the namespace you want to delete does not contain a collection with dependencies. .Prerequisites * The namespace you are deleting does not have a collection with dependencies. * You have *Delete namespace* permissions. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to your {PrivateHubName}. + +. Log in to your {PlatformNameShort}. . From the navigation panel, select {MenuACNamespaces}. . Click the namespace to be deleted. . Click the btn:[More Actions] icon *{MoreActionsIcon}*, then click btn:[Delete namespace]. + -NOTE: If the btn:[Delete namespace] button is disabled, the namespace contains a collection with dependencies. Review the collections in this namespace, and delete any dependencies. See link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_automation_hub/uploading-content-hub#delete-collection[Deleting a collection on automation hub] for information. +[NOTE] +==== +If the btn:[Delete namespace] button is disabled, the namespace contains a collection with dependencies. Review the collections in this namespace, and delete any dependencies. +==== + +// hherbly: LINK NEEDS UPDATE See link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_automation_hub/uploading-content-hub#delete-collection[Deleting a collection on automation hub] for information. The namespace that you deleted, as well as its associated collections, is now deleted and removed from the namespace list view. diff --git a/downstream/modules/hub/proc-deploying-your-system-for-container-signing.adoc b/downstream/modules/hub/proc-deploying-your-system-for-container-signing.adoc index bfb33d4014..c62088fc5d 100644 --- a/downstream/modules/hub/proc-deploying-your-system-for-container-signing.adoc +++ b/downstream/modules/hub/proc-deploying-your-system-for-container-signing.adoc @@ -3,9 +3,11 @@ = Deploying your system for container signing -{HubNameStart} implements image signing to offer better security for the {ExecEnvShort} container images. -To deploy your system so that it is ready for container signing, create a signing script. +To deploy your system so that it is ready for container signing, first ensure that you have +link:{URLContainerizedInstall}/aap-containerized-installation#enabling-automation-hub-collection-and-container-signing_aap-containerized-installation[enabled automation content collection and container signing]. +Then you can create a signing script, or +link:{URLHubManagingContent}/managing-containers-hub#adding-an-execution-environment[add and sign an {ExecEnvShort}] manually. [NOTE] ==== @@ -58,10 +60,8 @@ automationhub_container_signing_service_key = /absolute/path/to/key/to/sign automationhub_container_signing_service_script = /absolute/path/to/script/that/signs ----- + -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Once installation is complete, navigate to your {HubName}. -. From the navigation panel, select {MenuACAdminSignatureKeys}. +. Once installation is complete, log in to {PlatformNameShort} and navigate to {MenuACAdminSignatureKeys}. . Ensure that you have a key titled *container-default*, or *container*-_anyname_. diff --git a/downstream/modules/hub/proc-downloading-signature-public-keys.adoc b/downstream/modules/hub/proc-downloading-signature-public-keys.adoc index 08cfdc1e0a..f0dbfa1103 100644 --- a/downstream/modules/hub/proc-downloading-signature-public-keys.adoc +++ b/downstream/modules/hub/proc-downloading-signature-public-keys.adoc @@ -4,12 +4,12 @@ = Downloading signature public keys -After you sign and approve collections, download the signature public keys from the {HubName} UI. +After you sign and approve collections, download the signature public keys from the {PlatformNameShort} UI. You must download the public key before you add it to the local system keyring. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to your {HubName}. + +. Log in to {PlatformNameShort}. . From the navigation panel, select {MenuACAdminSignatureKeys}. The Signature Keys dashboard displays a list of multiple keys: collections and container images. @@ -19,8 +19,7 @@ The Signature Keys dashboard displays a list of multiple keys: collections and c . Choose one of the following methods to download your public key: -* Select the menu icon and click btn:[Download Key] to download the public key. -* Select the public key from the list and click the _Copy to clipboard_ icon. -* Click the drop-down menu under the *_Public Key_* tab and copy the entire public key block. +* Click the btn:[Download Key] icon to download the public key. +* Click the btn:[Copy to clipboard] next to the public key you want to copy. Use the public key that you copied to verify the content collection that you are installing. diff --git a/downstream/modules/hub/proc-edit-namespace.adoc b/downstream/modules/hub/proc-edit-namespace.adoc index f5b05842a6..fc08cbb18f 100644 --- a/downstream/modules/hub/proc-edit-namespace.adoc +++ b/downstream/modules/hub/proc-edit-namespace.adoc @@ -4,19 +4,20 @@ = Adding additional information and resources to a namespace -You can add information and provide resources for your users to accompany collections included in the namespace. Add a logo and a description, and link users to your GitHub repository, issue tracker, or other online assets. You can also enter markdown text in the *Edit resources* tab to include more information. This is helpful to users who use your collection in their automation tasks. +You can add information and provide resources for your users to accompany collections included in the namespace. For example, you can add a logo and a description, and link users to your GitHub repository, issue tracker, or other online assets. You can also enter markdown text in the *Resources* field to include more information. This is helpful to users who use your collection in their automation tasks. .Prerequisites * You have *Change Namespaces* permissions. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to your {PrivateHubName}. + +. Log in to {PlatformNameShort}. . From the navigation panel, select {MenuACNamespaces}. -. Click the btn:[More Actions] icon *{MoreActionsIcon}* and select *Edit namespace*. -. In the *Edit details* tab, enter information in the fields. -. Click the *Edit resources* tab to enter markdown in the text field. -. Click btn:[Save]. +. Select the namespace you want to edit. +. Click the btn:[Edit namespace]. +. Enter the relevant information in the fields. +. Optional: enter markdown information in the *Resources* field. +. Click btn:[Save namespace]. -Your content developers can now upload collections to your new namespace, or allow users in groups assigned as owners to upload collections. +Your content developers can now upload collections to your new namespace, or allow users in teams assigned as owners to upload collections. diff --git a/downstream/modules/hub/proc-export-collection.adoc b/downstream/modules/hub/proc-export-collection.adoc index e4b8dfd37d..8b2a027a95 100644 --- a/downstream/modules/hub/proc-export-collection.adoc +++ b/downstream/modules/hub/proc-export-collection.adoc @@ -8,8 +8,8 @@ After collections are finalized, you can import them to a location where they can be distributed to others across your organization. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to {PrivateHubName}. + +. Log in to {PlatformNameShort}. . From the navigation panel, select {MenuACCollections}. The *Collections* page displays all collections across all repositories. You can search for a specific collection. -. Select the collection that you want to export. The collection details page opens. +. Click into the collection that you want to export. The collection details page opens. . From the *Install* tab, select *Download tarball*. The .tar file is downloaded to your default browser downloads folder. You can now import it to the location of your choosing. diff --git a/downstream/modules/hub/proc-import-collection.adoc b/downstream/modules/hub/proc-import-collection.adoc index b3891d5b00..ffe5464a03 100644 --- a/downstream/modules/hub/proc-import-collection.adoc +++ b/downstream/modules/hub/proc-import-collection.adoc @@ -8,15 +8,17 @@ As an automation content creator, you can import a collection to use in a custom repository. To use a collection in your custom repository, you must first import the collection into your namespace so the {HubName} administrator can approve it. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to {HubName}. + +. Log in to {PlatformNameShort}. . From the navigation panel, select {MenuACNamespaces}. The *Namespaces* page displays all of the namespaces available. -. Click btn:[View Collections]. +. Select the namespace to which you want to add your collection. +. Select the *Collections* tab. . Click btn:[Upload Collection]. -. Navigate to the collection tarball file, select the file and click btn:[Open]. -. Click btn:[Upload]. +. Enter or browse to select a collection file. +. Select the repository pipeline to add the collection. The choices are *Staging repos* and *Repositories without pipeline*. +. Click btn:[Upload collection]. + -The *My Imports* screen displays a summary of tests and notifies you if the collection upload is successful or has failed. +The *Imports* screen displays a summary of tests and notifies you if the collection upload is successful or has failed. To find your imports, on your namespace click the btn:[More Actions] icon *{MoreActionsIcon}* and select *Imports*. + [NOTE] ==== diff --git a/downstream/modules/hub/proc-obtain-images.adoc b/downstream/modules/hub/proc-obtain-images.adoc index 56e3593dc4..d5aa7ac6aa 100644 --- a/downstream/modules/hub/proc-obtain-images.adoc +++ b/downstream/modules/hub/proc-obtain-images.adoc @@ -3,13 +3,14 @@ [id="obtain-images"] -= Pulling images for use in {HubName} += Pulling {ExecEnvShort}s for use in {HubName} [role="_abstract"] -Before you can push container images to your {PrivateHubName}, you must first pull them from an existing registry and tag them for use. The following example details how to pull an image from the Red Hat Ecosystem Catalog (registry.redhat.io). +Before you can push {ExecEnvShort}s to your {PrivateHubName}, you must first pull them from an existing registry and tag them for use. The following example details how to pull an {ExecEnvShort} from the Red Hat Ecosystem Catalog (registry.redhat.io). .Prerequisites -You have permissions to pull images from registry.redhat.io. + +* You have permissions to pull {ExecEnvName} from registry.redhat.io. .Procedure @@ -20,17 +21,16 @@ $ podman login registry.redhat.io ----- + . Enter your username and password. -. Pull a container image: +. Pull an {ExecEnvShort}: + [subs="+quotes"] ----- -$ podman pull registry.redhat.io/____:____ +$ podman pull registry.redhat.io/____:____ ----- - .Verification -To verify that the image you recently pulled is contained in the list, take these steps: +To verify that the {ExecEnvShort} you recently pulled is contained in the list, take these steps: . List the images in local storage: + @@ -38,9 +38,10 @@ To verify that the image you recently pulled is contained in the list, take thes $ podman images ----- + -. Check the image name, and verify that the tag is correct. +. Check the {ExecEnvShort} name, and verify that the tag is correct. [role="_additional-resources"] .Additional resources -* See link:https://redhat-connect.gitbook.io/catalog-help/[Red Hat Ecosystem Catalog Help] for information on registering and getting images. +* See link:https://redhat-connect.gitbook.io/catalog-help/[Red Hat Ecosystem Catalog Help] for information on registering and getting {ExecEnvShort}s. + diff --git a/downstream/modules/hub/proc-obtaining-org-collection-url.adoc b/downstream/modules/hub/proc-obtaining-org-collection-url.adoc index 7708a81ece..24b36ef28d 100644 --- a/downstream/modules/hub/proc-obtaining-org-collection-url.adoc +++ b/downstream/modules/hub/proc-obtaining-org-collection-url.adoc @@ -1,4 +1,4 @@ -[id="proc-create-api-token"] +[id="retrieve-api-token_{context}"] = Retrieving the API token for your Red Hat Certified Collection You can synchronize {CertifiedName} curated by your organization from `{Console}` to your {PrivateHubName}. diff --git a/downstream/modules/hub/proc-provide-remote-access.adoc b/downstream/modules/hub/proc-provide-remote-access.adoc index 0677ef0640..e0391ed25e 100644 --- a/downstream/modules/hub/proc-provide-remote-access.adoc +++ b/downstream/modules/hub/proc-provide-remote-access.adoc @@ -8,11 +8,12 @@ After you create a remote configuration, you must provide access to it before anyone can use it. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to {PrivateHubName}. + +. Log in to {PlatformNameShort}. . From the navigation panel, select {MenuACAdminRemotes}. -. Locate your repository in the list, click the btn:[More Actions] icon *{MoreActionsIcon}*, and select *Edit*. -. Select the *Access* tab. -. Select a group for *Repository owners*. See link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_automation_hub/assembly-user-access[Configuring user access for your {PrivateHubName}] for information about implementing user access. -. Select the appropriate roles for the selected group. -. Click btn:[Save]. +. Click into your repository in the list, and then select the *Team Access* tab. +. Click btn:[Add roles]. +. Select the team to which you want to grant a role, then click btn:[Next]. +. Select the roles you want to apply to the selected team, and then click btn:[Next]. +. Review your selections and click btn:[Finish]. +. Click btn:[Close] to complete the process. diff --git a/downstream/modules/hub/proc-provide-repository-access.adoc b/downstream/modules/hub/proc-provide-repository-access.adoc index d69c7ae3f1..9348be5ac0 100644 --- a/downstream/modules/hub/proc-provide-repository-access.adoc +++ b/downstream/modules/hub/proc-provide-repository-access.adoc @@ -8,14 +8,15 @@ By default, private repositories and the automation content collections are hidden from all users in the system. Public repositories can be viewed by all users, but cannot be modified. Use this procedure to provide access to your custom repository. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... + . Log in to {PrivateHubName}. . From the navigation panel, select {MenuACAdminRepositories}. -. Locate your repository in the list and click the btn:[More Actions] icon *{MoreActionsIcon}*, then select *Edit*. -. Select the *Access* tab. -. Select a group for *Repository owners*. -+ -See link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_automation_hub/assembly-user-access[Configuring user access for your {PrivateHubName}] for information about implementing user access. -+ -. Select the roles you want assigned to the selected group. -. Click btn:[Save]. +. Click into your repository in the list and select the *Team Access* tab. +. Click btn:[Add roles]. +. Select the team to which you want to grant a role, then click btn:[Next]. +. Select the roles you want to apply to the selected team, and then click btn:[Next]. +. Review your selections and click btn:[Finish]. +. Click btn:[Close] to complete the process. + +See link:{LinkCentralAuth} for more information about implementing user access. + diff --git a/downstream/modules/hub/proc-pull-image.adoc b/downstream/modules/hub/proc-pull-image.adoc index 80bd9c4509..33d32aa173 100644 --- a/downstream/modules/hub/proc-pull-image.adoc +++ b/downstream/modules/hub/proc-pull-image.adoc @@ -4,7 +4,7 @@ [role="_abstract"] -You can pull images from the {HubName} container registry to make a copy to your local machine. +You can pull {ExecEnvName} from the {HubName} remote registry to make a copy to your local machine. .Prerequisites @@ -12,9 +12,9 @@ You can pull images from the {HubName} container registry to make a copy to your .Procedure -. If you are pulling container images from a password or token-protected registry, xref:proc-create-credential[create a credential in {ControllerName}] before pulling the image. +. If you are pulling {ExecEnvName} from a password or token-protected registry, xref:proc-create-credential[create a credential] before pulling the {ExecEnvName}. . From the navigation panel, select {MenuACExecEnvironments}. -. Select your container repository. +. Select your {ExecEnvName}. . In the *Pull this image* entry, click btn:[Copy to clipboard]. . Paste and run the command in your terminal. diff --git a/downstream/modules/hub/proc-push-container.adoc b/downstream/modules/hub/proc-push-container.adoc index 8ed64ec309..77c9d93a27 100644 --- a/downstream/modules/hub/proc-push-container.adoc +++ b/downstream/modules/hub/proc-push-container.adoc @@ -3,11 +3,11 @@ [id="push-containers"] -= Pushing a container image to {PrivateHubName} += Pushing an {ExecEnvShort} to {PrivateHubName} [role="_abstract"] -You can push tagged container images to {PrivateHubName} to create new containers and populate the container registry. +You can push tagged {ExecEnvShort}s to {PrivateHubName} to create new containers and populate the remote registry. .Prerequisites @@ -28,11 +28,11 @@ $ podman login -u=____ -p=____ ____ Let Podman prompt you for your password when you log in. Entering your password at the same time as your username can expose your password to the shell history. ==== + -. Push your container image to your {HubName} container registry: +. Push your {ExecEnvShort} to your {HubName} remote registry: + [subs="+quotes"] ----- -$ podman push ____/____ +$ podman push ____/____ ----- .Troubleshooting @@ -42,8 +42,9 @@ This may lead to image-layer digest changes and a failed push operation, resulti .Verification -. Log in to your {HubName}. -//[ddacosta] I see no such selection. Should this be changed to Execution Environments > Remote Registries? If so, replace with {MenuACAdminRemoteRegistries} -. Navigate to menu:Container Registry[]. +. Log in to your {PlatformNameShort}. +//[ddacosta] I see no such selection. Should this be changed to Execution Environments > Remote Registries? If so, replace with {MenuACAdminRemoteRegistries} +// [hherbly] I think it's {MenuACExecEnvironments} based on the context but need to double check. +. Navigate to {MenuACExecEnvironments}. . Locate the container in the container repository list. diff --git a/downstream/modules/hub/proc-pushing-container-images-from-your-local.adoc b/downstream/modules/hub/proc-pushing-container-images-from-your-local.adoc index 8cdc4ab79d..01e23b6407 100644 --- a/downstream/modules/hub/proc-pushing-container-images-from-your-local.adoc +++ b/downstream/modules/hub/proc-pushing-container-images-from-your-local.adoc @@ -3,54 +3,53 @@ = Pushing container images from your local environment -Use the following procedure to sign images on a local system and push those signed images to the {HubName} registry. +Use the following procedure to sign an {ExecEnvNameSing} on a local system and push the signed {ExecEnvShort} to the {HubName} registry. .Procedure -. From a terminal, log into podman, or any container client currently in use: +. From a terminal, log in to Podman, or any container client currently in use: + ---- > podman pull ---- + -. After the image is pulled, add tags (for example: latest, rc, beta, or version numbers, such as 1.0; 2.3, and so on): +. After the {ExecEnvShort} is pulled, add tags (for example: latest, rc, beta, or version numbers, such as 1.0; 2.3, and so on): + ---- > podman tag /: ---- + -. Sign the image after changes have been made, and push it back up to the {HubName} registry: +. Sign the {ExecEnvShort} after changes have been made, and push it back up to the {HubName} registry: + ---- > podman push /: --tls-verify=false --sign-by ---- + -If the image is not signed, it can only be pushed with any current signature embedded. Alternatively, you can use the following script to push the image without signing it: +If the {ExecEnvShort} is not signed, it can only be pushed with any current signature embedded. Alternatively, you can use the following script to push the {ExecEnvShort} without signing it: + ---- > podman push /: --tls-verify=false ---- + -. Once the image has been pushed, navigate to your {HubName}. - -. From the navigation panel, select {MenuACExecEnvironments}. +. Once the {ExecEnvShort} has been pushed, navigate to {MenuACExecEnvironments}. . To display the new {ExecEnvShort}, click the *Refresh* icon. -. Click the name of the image to view your pushed image. +. Click the name of the image to view your pushed image. .Troubleshooting -The details page in {HubName} indicates whether or not an image has been signed. If the details page indicates that an image is *Unsigned*, you can sign the image from {HubName} using the following steps: +The details page for each {ExecEnvShort} indicates whether it has been signed. If the details page indicates that an image is *Unsigned*, you can sign the {ExecEnvShort} from {HubName} using the following steps: -. Click the image name to navigate to the details page. +. Click the {ExecEnvShort} name to navigate to the details page. . Click the btn:[More Actions] icon *{MoreActionsIcon}*. Three options are available: +* *Sign {ExecEnvShort}* * *Use in Controller* -* *Delete* -* *Sign* +* *Delete {ExecEnvShort}* + -. Click *Sign* from the drop-down menu. +. Click *Sign {ExecEnvShort}* from the drop-down menu. -The signing service signs the image. -After the image is signed, the status changes to "signed". +The signing service signs the {ExecEnvShort}. +After the {ExecEnvShort} is signed, the status changes to "signed". diff --git a/downstream/modules/hub/proc-reject-collections.adoc b/downstream/modules/hub/proc-reject-collections.adoc index 5f46dfb1da..87206c611d 100644 --- a/downstream/modules/hub/proc-reject-collections.adoc +++ b/downstream/modules/hub/proc-reject-collections.adoc @@ -2,9 +2,9 @@ = Rejecting collections uploaded for review -You can reject collections uploaded to individual namespaces. All collections awaiting review are located under the *Approval* tab in the *Staging* repository. +You can reject collections uploaded to individual namespaces. All collections awaiting review are located in {MenuACAdminCollectionApproval}. -Collections requiring approval have the status *Needs review*. Click the *Version* to view the contents of the collection. +Collections requiring approval have the status *Needs review*. .Prerequisites @@ -13,7 +13,7 @@ Collections requiring approval have the status *Needs review*. Click the *Versio .Procedure . From the navigation panel, select {MenuACAdminCollectionApproval}. -. Locate the collection to review. -. Click btn:[Reject] to decline the collection. +. Find the collection you want to review in the list. You can also filter collections by Namespace, Repository, and Status using the search bar. +. Click the thumbs down icon to reject the collection. Confirm your choice in the modal that appears. Collections you decline for publication are moved to the *Rejected* repository. diff --git a/downstream/modules/hub/proc-revert-repository-version.adoc b/downstream/modules/hub/proc-revert-repository-version.adoc index 0c5ea68389..11a408f088 100644 --- a/downstream/modules/hub/proc-revert-repository-version.adoc +++ b/downstream/modules/hub/proc-revert-repository-version.adoc @@ -8,9 +8,9 @@ When automation content collections are added or removed from a repository, a new version is created. If a change to your repository causes a problem, you can revert to a previous version. Reverting is a safe operation and does not delete collections from the system, but rather, changes the content associated with the repository. The number of versions saved is defined in the *Retained number of versions* setting when a xref:proc-create-repository[repository is created]. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to {PrivateHubName}. + +. Log in to {PlatformNameShort}. . From the navigation panel, select {MenuACAdminRepositories}. -. Locate your repository in the list and click the btn:[More Actions] icon *{MoreActionsIcon}*, then select *Edit*. +. Click into your repository in the list and then select the *Versions* tab. . Locate the version you want to revert to and click the btn:[More Actions] icon *{MoreActionsIcon}*, and select *Revert to this version*. -. Click btn:[Revert]. +. Check the box confirming your selection, and then click btn:[Revert to repository version]. diff --git a/downstream/modules/hub/proc-review-collection-imports.adoc b/downstream/modules/hub/proc-review-collection-imports.adoc index 2911d6a6c6..615a13af69 100644 --- a/downstream/modules/hub/proc-review-collection-imports.adoc +++ b/downstream/modules/hub/proc-review-collection-imports.adoc @@ -14,11 +14,11 @@ Import log:: activities executed during the collection import * You have access to a namespace to which you can upload collections. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to your {PrivateHubName}. + +. Log in to your {PlatformNameShort}. . From the navigation panel, select {MenuACNamespaces}. . Select a namespace. -. Click the btn:[More Actions] icon *{MoreActionsIcon}* and select *My imports*. +. Click the btn:[More Actions] icon *{MoreActionsIcon}* and select *Imports*. . Use the search field or locate an imported collection from the list. . Click the imported collection. . Review collection import details to determine the status of the collection in your namespace. diff --git a/downstream/modules/hub/proc-set-community-remote.adoc b/downstream/modules/hub/proc-set-community-remote.adoc index d2353ad6d8..5f519894a0 100644 --- a/downstream/modules/hub/proc-set-community-remote.adoc +++ b/downstream/modules/hub/proc-set-community-remote.adoc @@ -1,6 +1,6 @@ // Module included in the following assemblies: // obtaining-token/master.adoc -[id="proc-set-community-remote"] +[id="proc-set-community-remote_{context}"] = Configuring the community remote repository and syncing {Galaxy} collections You can edit the *community* remote repository to synchronize chosen collections from {Galaxy} to your {PrivateHubName}. @@ -9,7 +9,7 @@ By default, your {PrivateHubName} community repository directs to `galaxy.ansibl .Prerequisites * You have *Modify Ansible repo content* permissions. -For more information on permissions, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_automation_hub/assembly-user-access[Configuring user access for your {PrivateHubName}]. +For more information on permissions, see link:{LinkCentralAuth}. * You have a `requirements.yml` file that identifies those collections to synchronize from {Galaxy} as in the following example: + .Requirements.yml example @@ -22,18 +22,24 @@ collections: ----- .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to {HubName}. + +. Log in to {PlatformNameShort}. . From the navigation panel, select {MenuACAdminRemotes}. -. In the *Community* remote, click the btn:[More Actions] icon *{MoreActionsIcon}* and select *Edit*. -. In the *YAML requirements* field, click btn:[Browse] and locate the `requirements.yml` file on your local machine. -. Click btn:[Save]. +. In the *Details* tab in the *Community* remote, click btn:[Edit remote]. +. In the *YAML requirements* field, paste the contents of your `requirements.yml` file. +. Click btn:[Save remote]. + You can now synchronize collections identified in your `requirements.yml` file from {Galaxy} to your {PrivateHubName}. -. Click the btn:[More Actions] icon *{MoreActionsIcon}* and select *Sync* to sync collections from {Galaxy} and {HubNameMain}. +. From the navigation panel, select {MenuACAdminRepositories}. Next to the *community* repository, click the btn:[More Actions] icon *{MoreActionsIcon}* and select *Sync repository* to sync collections between {Galaxy} and {HubNameMain}. +. On the modal that appears, you can toggle the following options: +* *Mirror*: Select if you want your repository content to mirror the remote repository's content. +* *Optimize*: Select if you want to sync only when no changes are reported by the remote server. +. Click btn:[Sync] to complete the sync. .Verification -The *Sync status* notification updates to notify you of completion or failure of {Galaxy} collections synchronization to your {HubNameMain}. +The *Sync status* column updates to notify you whether the {Galaxy} collections synchronization to your {HubNameMain} is successful. + +* Navigate to {MenuACCollections} and select *Community* to confirm successful synchronization. + -* Select *Community* from the collections content drop-down list to confirm successful synchronization. diff --git a/downstream/modules/hub/proc-set-rhcertified-remote.adoc b/downstream/modules/hub/proc-set-rhcertified-remote.adoc index 7ddc8c7c99..a8ef4dc59a 100644 --- a/downstream/modules/hub/proc-set-rhcertified-remote.adoc +++ b/downstream/modules/hub/proc-set-rhcertified-remote.adoc @@ -1,6 +1,6 @@ // Module included in the following assemblies: // obtaining-token/master.adoc -[id="proc-set-rhcertified-remote"] +[id="proc-set-rhcertified-remote_{context}"] = Configuring the rh-certified remote repository and synchronizing {CertifiedColl} You can edit the *rh-certified* remote repository to synchronize collections from {HubName} hosted on {Console} to your {PrivateHubName}. @@ -8,32 +8,33 @@ By default, your {PrivateHubName} `rh-certified` repository includes the URL for To use only those collections specified by your organization, a {PrivateHubName} administrator can upload manually-created requirements files from the `rh-certified` remote. -For more information about using requirements files, see link:https://docs.ansible.com/ansible/latest/collections_guide/collections_installing.html#install-multiple-collections-with-a-requirements-file[Install multiple collections with a requirements file] in the _Using Ansible collections_ guide. - If you have collections `A`, `B`, and `C` in your requirements file, and a new collection `X` is added to {Console} that you want to use, you must add `X` to your requirements file for {PrivateHubName} to synchronize it. - .Prerequisites * You have valid *Modify Ansible repo content* permissions. -For more information on permissions, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_automation_hub/assembly-user-access[Configuring user access for your {PrivateHubName}]. +For more information on permissions, see link:{LinkCentralAuth}. * You have retrieved the Sync URL and API Token from the {HubName} hosted service on {Console}. -* You have configured access to port 443. This is required for synchronizing certified collections. For more information, see the {HubName} table in the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_planning_guide/ref-network-ports-protocols_planning[Network ports and protocols] chapter of the {PlatformName} Planning Guide. +* You have configured access to port 443. This is required for synchronizing certified collections. For more information, see the {HubName} table in the link:{URLPlanningGuide}/ref-network-ports-protocols_planning[Network ports and protocols] chapter of {TitlePlanningGuide}. .Procedure -//[ddacosta] For 2.5 this will be Log in to Ansible Automation Platform and select Automation Content. Automation hub opens in a new tab. From the navigation ... -. Log in to your {PrivateHubName}. + +. Log in to your {PlatformNameShort}. . From the navigation panel, select {MenuACAdminRemotes}. -. In the *rh-certified* remote repository, click the btn:[More Actions] icon *{MoreActionsIcon}* and click btn:[Edit]. +. In the *rh-certified* remote repository, click btn:[Edit remote]. . In the *URL* field, paste the *Sync URL*. . In the *Token* field, paste the token you acquired from {Console}. -. Click btn:[Save]. +. Click btn:[Save remote]. + You can now synchronize collections between your organization synclist on {Console} and your {PrivateHubName}. + -. Click the btn:[More Actions] icon *{MoreActionsIcon}* and select *Sync*. +. From the navigation panel, select {MenuACAdminRepositories}. Next to *rh-certified* click the btn:[More Actions] icon *{MoreActionsIcon}* and select *Sync repository*. +. On the modal that appears, you can toggle the following options: +* *Mirror*: Select if you want your repository content to mirror the remote repository's content. +* *Optimize*: Select if you want to sync only when no changes are reported by the remote server. +. Click btn:[Sync] to complete the sync. .Verification -The *Sync status* notification updates to notify you that the Red Hat Certified Content Collections synchronization is complete. +The *Sync status* column updates to notify you whether the Red Hat Certified Content Collections synchronization is successful. -* Select *Red Hat Certified* from the collections content drop-down list to confirm that your collections content has synchronized successfully. +* Navigate to {MenuACCollections} to confirm that your collections content has synchronized successfully. diff --git a/downstream/modules/hub/proc-sync-image.adoc b/downstream/modules/hub/proc-sync-image.adoc index 1cfc99be65..826e6ed38b 100644 --- a/downstream/modules/hub/proc-sync-image.adoc +++ b/downstream/modules/hub/proc-sync-image.adoc @@ -3,8 +3,8 @@ [id="proc-sync-image-adoc_{context}"] = Syncing images from a container repository -You can pull images from the {HubName} container registry to sync an image to your local machine. -To sync an image from a remote container registry, you must first configure a remote registry. +You can pull {ExecEnvName} from the {HubName} remote registry to sync an image to your local machine. +To sync an {ExecEnvNameSing} from a remote registry, you must first configure a remote registry. .Prerequisites @@ -20,16 +20,16 @@ You must have permission to view and pull from a private container repository. + [NOTE] ==== -Some container registries are aggressive with rate limiting. +Some remote registries are aggressive with rate limiting. Set a rate limit under *Advanced Options*. ==== + . From the navigation panel, select {MenuACExecEnvironments}. -. Click btn:[Add execution environment] in the page header. +. Click btn:[Create execution environment] in the page header. . Select the registry you want to pull from. -The *Name* field displays the name of the image displayed on your local registry. +The *Name* field displays the name of the {ExecEnvName} displayed on your local registry. + [NOTE] ==== @@ -38,7 +38,7 @@ For example, if the upstream name is set to "alpine" and the *Name* field is "lo ==== + . Set a list of tags to include or exclude. -Syncing images with a large number of tags is time consuming and uses a lot of disk space. +Syncing {ExecEnvName} with a large number of tags is time consuming and uses a lot of disk space. [role="_additional-resources"] .Additional resources diff --git a/downstream/modules/hub/proc-tag-image.adoc b/downstream/modules/hub/proc-tag-image.adoc index b71fa08364..61024ca7bb 100644 --- a/downstream/modules/hub/proc-tag-image.adoc +++ b/downstream/modules/hub/proc-tag-image.adoc @@ -5,21 +5,20 @@ = Tagging container images [role="_abstract"] -Tag images to add an additional name to images stored in your {HubName} container repository. If no tag is added to an image, {HubName} defaults to `latest` for the name. +Tag {ExecEnvName} to add an additional name to {ExecEnvName} stored in your {HubName} container repository. If no tag is added to an {ExecEnvNameSing}, {HubName} defaults to `latest` for the name. .Prerequisites -* You have *change image tags* permissions. +* You have *change {ExecEnvNameSing} tags* permissions. .Procedure . From the navigation panel, select {MenuACExecEnvironments}. -. Select your container repository. +. Select your {ExecEnvName}. . Click the *Images* tab. . Click the btn:[More Actions] icon *{MoreActionsIcon}*, and click btn:[Manage tags]. . Add a new tag in the text field and click btn:[Add]. . Optional: Remove *current tags* by clicking btn:[x] on any of the tags for that image. -. Click btn:[Save]. .Verification * Click the *Activity* tab and review the latest changes. diff --git a/downstream/modules/hub/proc-tag-pulled-image.adoc b/downstream/modules/hub/proc-tag-pulled-image.adoc index b7e5ae9176..a962cc1ba7 100644 --- a/downstream/modules/hub/proc-tag-pulled-image.adoc +++ b/downstream/modules/hub/proc-tag-pulled-image.adoc @@ -3,34 +3,33 @@ [id="tag-pulled-images"] -= Tagging images for use in {HubName} += Tagging {ExecEnvShort}s for use in {HubName} [role="_abstract"] -After you pull images from a registry, tag them for use in your {PrivateHubName} container registry. +After you pull {ExecEnvShort}s from a registry, tag them for use in your {PrivateHubName} remote registry. .Prerequisites -* You have pulled a container image from an external registry. +* You have pulled an {ExecEnvShort} from an external registry. * You have the FQDN or IP address of the {HubName} instance. .Procedure -* Tag a local image with the {HubName} container repository: +* Tag a local {ExecEnvShort} with the {HubName} container repository: + [subs="+quotes"] ----- -$ podman tag registry.redhat.io/____:____ ____/____ +$ podman tag registry.redhat.io/____:____ ____/____ ----- .Verification - . List the images in local storage: + ----- $ podman images ----- + -. Verify that the image you recently tagged with your {HubName} information is contained in the list. +. Verify that the {ExecEnvShort} you recently tagged with your {HubName} information is contained in the list. diff --git a/downstream/modules/hub/proc-uploading-collections.adoc b/downstream/modules/hub/proc-uploading-collections.adoc index 0c648447fc..72452b4adc 100644 --- a/downstream/modules/hub/proc-uploading-collections.adoc +++ b/downstream/modules/hub/proc-uploading-collections.adoc @@ -13,14 +13,18 @@ Format your collection file name as follows: `. {ControllerNameStart} user names do not include a prefix. + +*{ControllerNameStart} user accounts take precedence:* When an individual user had accounts on multiple services in 2.4, priority is given to their {ControllerName} account during migration, so those are not renamed. + +*Component level roles are retained until user migration is complete:* When users log in using an existing service account and do not perform the account linking process, only the roles for that specific service account are available. The migration process is completed once the user performs the account linking process. At that time, all roles for all services are migrated into the new {Gateway} user account. + +[role="_additional-resources"] + +== Additional resources + +* See link:{URLCentralAuth}/gw-managing-access#proc-controller-creating-a-user[Creating a user] for more information on user types. diff --git a/downstream/modules/platform/con-aap-migration-considerations.adoc b/downstream/modules/platform/con-aap-migration-considerations.adoc index 453523b5a0..30f5f11245 100644 --- a/downstream/modules/platform/con-aap-migration-considerations.adoc +++ b/downstream/modules/platform/con-aap-migration-considerations.adoc @@ -4,4 +4,6 @@ [role="_abstract"] -If you are upgrading from {PlatformNameShort} 1.2 on {OCPShort} 3 to {PlatformNameShort} 2.x on {OCPShort} 4, you must provision a fresh {OCPShort} version 4 cluster and then migrate the {PlatformNameShort} to the new cluster. +If you are upgrading from any version of {PlatformNameShort} older than 2.4, you must upgrade through {PlatformNameShort} first. +If you are on {OCPShort} 3 and you want to upgrade to {OCPShort} 4, you must provision a fresh {OCPShort} version 4 cluster and then migrate the {PlatformNameShort} to the new cluster. + diff --git a/downstream/modules/platform/con-aap-migration-prepare.adoc b/downstream/modules/platform/con-aap-migration-prepare.adoc index dba1b6eff6..8a35debe8b 100644 --- a/downstream/modules/platform/con-aap-migration-prepare.adoc +++ b/downstream/modules/platform/con-aap-migration-prepare.adoc @@ -4,7 +4,7 @@ [role="_abstract"] -Before migrating your current {PlatformNameShort} deployment to {OperatorPlatform}, you need to back up your existing data, create k8s secrets for your secret key and postgresql configuration. +Before migrating your current {PlatformNameShort} deployment to {OperatorPlatformNameShort}, you must back up your existing data, and create Kubernetes secrets for your secret key and postgresql configuration. [NOTE] ==== diff --git a/downstream/modules/platform/con-aap-upgrade-planning.adoc b/downstream/modules/platform/con-aap-upgrade-planning.adoc index 73b7ca8a25..d811ea21fb 100644 --- a/downstream/modules/platform/con-aap-upgrade-planning.adoc +++ b/downstream/modules/platform/con-aap-upgrade-planning.adoc @@ -3,34 +3,40 @@ [id="aap-upgrade-planning_{context}"] = {PlatformNameShort} upgrade planning - + [role="_abstract"] Before you begin the upgrade process, review the following considerations to plan and prepare your {PlatformNameShort} deployment: -[discrete] -== {ControllerNameStart} +* See link:{URLPlanningGuide}/platform-system-requirements[System requirements] in the {TitlePlanningGuide} guide to ensure you have the topologies that fit your use case. ++ +[NOTE] +==== +2.4 to 2.5 upgrades now include link:{URLPlanningGuide}/ref-aap-components#con-about-platform-gateway_planning[{GatewayStart}]. Ensure you review the 2.5 link:{URLPlanningGuide}/ref-network-ports-protocols_planning[Network ports and protocols] for architectural changes. +==== ++ +[IMPORTANT] +==== +When upgrading from {PlatformNameShort} 2.4 to 2.5, the API endpoints for the {ControllerName}, {HubName}, and {EDAcontroller} are all available for use. These APIs are being deprecated and will be disabled in an upcoming release. This grace period is to allow for migration to the new APIs put in place with the {Gateway}. +==== ++ +* Verify that you have a valid subscription before upgrading from a previous version of {PlatformNameShort}. Existing subscriptions are carried over during the upgrade process. +* Ensure you have a backup of an {PlatformNameShort} 2.4 environment before upgrading in case any issues occur. See link:{URLControllerAdminGuide}/controller-backup-and-restore[Backup and restore] and link:{LinkOperatorBackup} for the specific topology of the environment. +* Ensure you capture your inventory or instance group details before upgrading. +* Upgrades of {EDAName} version 2.4 to 2.5 are not supported. Database migrations between {EDAName} 2.4 and {EDAName} 2.5 are not compatible. For more information, see xref:upgrade-controller-hub-eda-unified-ui_aap-upgrading-platform[{ControllerName} and {HubName} 2.4 and {EDAName} 2.5 with unified UI upgrades]. ++ +If you are currently running {EDAcontroller} 2.5, it is recommended that you disable all {EDAName} activations before upgrading to ensure that only new activations run after the upgrade process is complete. +* {ControllerNameStart} OAuth applications on the platform UI are not supported for 2.4 to 2.5 migration. See this link:https://access.redhat.com/solutions/7091987[Knowledgebase article] for more information. To learn how to recreate your OAuth applications, see link:{URLCentralAuth}/gw-token-based-authentication#assembly-controller-applications[Applications] in the {TitleCentralAuth} guide. +* During the upgrade process, user accounts from the individual services are migrated. If there are accounts from multiple services, they must be linked to access the unified platform. See xref:account-linking_aap-post-upgrade[Account linking] for details. +* {PlatformNameShort} 2.5 offers a centralized Redis instance in both link:{URLPlanningGuide}/ha-redis_planning#gw-single-node-redis_planning[standalone] and link:{URLPlanningGuide}/ha-redis_planning#gw-clustered-redis_planning[clustered] topologies. For information on how to configure Redis, see link:{URLInstallationGuide}/assembly-platform-install-scenario#redis-config-enterprise-topology_platform-install-scenario[Configuring Redis] in the {TitleInstallationGuide} guide. +* When upgrading from {PlatformNameShort} 2.4 to {PlatformVers}, connections to the {Gateway} URL might fail on the {Gateway} UI if you are using the {ControllerName} behind a load balancer. The following error message is displayed: `Error connecting to Controller API` ++ +To resolve this issue, for each controller host, add the {Gateway} URL as a trusted source in the `CSRF_TRUSTED_ORIGIN` setting in the *settings.py* file for each controller host. You must then restart each controller host so that the URL changes are implemented. For more information, see _Upgrading_ in link:{LinkTroubleshootingAAP}. -* Even if you have a valid license from a previous version, you must provide your credentials or a subscriptions manifest upon upgrading to the latest version of {ControllerName}. -* If you need to upgrade {RHEL} and {ControllerName}, you must first backup and restore your {ControllerName} data. -* Clustered upgrades require special attention to instance and instance groups before upgrading. [role="_additional-resources"] .Additional resources -* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-managing-subscriptions#controller-importing-subscriptions[Importing a subscription] -* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/controller-backup-and-restore[Backup and restore] -* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/controller-clustering[Clustering] - -[discrete] -== {HubNameStart} - -* When upgrading to {PlatformNameShort} {PlatformVers}, you can either add an existing {HubName} API token or generate a new one and invalidate any existing tokens. - -[role="_additional-resources"] -.Additional resources -* <> - -[discrete] -== {EDAcontroller} -//ATTENTION: Remove this section for EDA 1.0.4; customers will no longer need to perform deactivation because services will be automatically restored after upgrade and migration. +* link:{URLCentralAuth}/assembly-gateway-licensing#proc-attaching-subscriptions[Attaching a subscription] +* xref:con-backup-aap_aap-upgrading-platform[Backup and restore] +* link:{URLControllerAdminGuide}/controller-clustering[Clustering] +* link:{LinkPlanningGuide} -* If you are currently running {EDAcontroller} and plan to deploy it when you upgrade to {PlatformNameShort} {PlatformVers}, it is recommended that you disable all {EDAName} activations before upgrading to ensure that only new activations run after the upgrade process has completed. This prevents possibilities of orphaned containers running activations from the previous version. \ No newline at end of file diff --git a/downstream/modules/platform/con-aap-upgrades.adoc b/downstream/modules/platform/con-aap-upgrades.adoc index d2be8be3bb..c2a3320e4d 100644 --- a/downstream/modules/platform/con-aap-upgrades.adoc +++ b/downstream/modules/platform/con-aap-upgrades.adoc @@ -4,11 +4,44 @@ = {PlatformNameShort} upgrades -Upgrading to version {PlatformVers} from {PlatformNameShort} 2.1 or later involves downloading the installation package and then performing the following steps: +Currently, it is possible to perform {PlatformNameShort} upgrades using one of the following supported upgrade paths. -* Set up your inventory to match your installation environment. -* Run the {PlatformVers} installation program over your current {PlatformNameShort} installation. +[IMPORTANT] +==== +Upgrading from {EDAName} 2.4 is not supported. If you’re using {EDAName} 2.4 in production, contact Red Hat before you upgrade. +==== -[role="_additional-resources"] -.Additional resources -* <> +Before beginning your upgrade be sure to review the prerequisites and upgrade planning sections of this guide. + +[cols="a,a"] +|=== +h|Supported upgrade path h| Steps to upgrade +|{PlatformNameShort} 2.4 to 2.5 | xref:proc-choosing-obtaining-installer_aap-upgrading-platform[Download the installation package]. + +xref:editing-inventory-file-for-updates_aap-upgrading-platform[Set up your inventory file] to match your installation environment. See link:{LinkTopologies} for a list of example inventory files. + +xref:con-backup-aap_aap-upgrading-platform[Back up your {PlatformNameShort} instance]. + +xref:proc-running-setup-script-for-updates[Run the 2.5 installation program] over your current {PlatformNameShort} instance. + +xref:account-linking_aap-post-upgrade[Link your existing service level accounts] to a single unified platform account. + +|{PlatformNameShort} 2.5 to 2.5.x | xref:proc-choosing-obtaining-installer_aap-upgrading-platform[Download the installation package]. + +xref:editing-inventory-file-for-updates_aap-upgrading-platform[Set up your inventory file] to match your installation environment. See link:{LinkTopologies} for a list of example inventory files. + +xref:con-backup-aap_aap-upgrading-platform[Back up your {PlatformNameShort} instance]. + +xref:proc-running-setup-script-for-updates[Run the 2.5 installation program] over your current {PlatformNameShort} instance. + +|xref:upgrade-controller-hub-eda-unified-ui_aap-upgrading-platform[{ControllerNameStart} and {HubName} 2.4 and {EDAName} 2.5 with unified UI upgrades] | Upgrade the 2.4 services (using inventory file to only specify {ControllerName} and {HubName} VMs) to get them to the initial version of AAP 2.5. + +After all the services are at the same version, run a 2.5 upgrade on all the services +|=== + + +// [hherbly]: not sure we need the addt'l resources block? the xref goes to the next section of the document. +// [ddacosta]: agree, it's not needed. +//[role="_additional-resources"] +//.Additional resources +//* xref:aap-upgrading-platform[Upgrading to {PlatformName} {PlatformVers}] diff --git a/downstream/modules/platform/con-about-automation-mesh.adoc b/downstream/modules/platform/con-about-automation-mesh.adoc index d4987af011..33cbaa5f14 100644 --- a/downstream/modules/platform/con-about-automation-mesh.adoc +++ b/downstream/modules/platform/con-about-automation-mesh.adoc @@ -6,8 +6,8 @@ [role="_abstract"] {AutomationMeshStart} is an overlay network intended to ease the distribution of work across a large and dispersed collection of workers through nodes that establish peer-to-peer connections with each other using existing networks. -{PlatformName} 2 replaces Ansible Tower and isolated nodes with {ControllerName} and {HubName}. -{ControllerNameStart} provides the control plane for automation through its UI, RESTful API, RBAC, workflows and CI/CD integration, while {AutomationMesh} can be used for setting up, discovering, changing or modifying the nodes that form the control and execution layers. +{PlatformName} 2 replaces Ansible Tower and isolated nodes with {PlatformNameShort} and {HubName}. +{PlatformNameShort} provides the control plane for automation through its UI, RESTful API, RBAC, workflows and CI/CD integration, while {AutomationMesh} can be used for setting up, discovering, changing or modifying the nodes that form the control and execution layers. ifdef::operator-mesh[] {AutomationMeshStart} is useful for: diff --git a/downstream/modules/platform/con-about-eda-controller.adoc b/downstream/modules/platform/con-about-eda-controller.adoc index 95bdffbaf5..3a539eff34 100644 --- a/downstream/modules/platform/con-about-eda-controller.adoc +++ b/downstream/modules/platform/con-about-eda-controller.adoc @@ -11,10 +11,5 @@ The {EDAcontroller} is the interface for event-driven automation and introduces [role="_additional-resources"] .Additional resources - -//// -The following link will not work until published. -//// - -* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_event-driven_ansible_guide/index[Getting Started with Event-Driven Ansible Guide]. +* link:{URLEDAUserGuide}[{TitleEDAUserGuide}] diff --git a/downstream/modules/platform/con-about-operator.adoc b/downstream/modules/platform/con-about-operator.adoc index d38d9620a2..4661de41e2 100644 --- a/downstream/modules/platform/con-about-operator.adoc +++ b/downstream/modules/platform/con-about-operator.adoc @@ -1,13 +1,16 @@ [id="con-about-operator_{context}"] -= About {OperatorPlatform} += About {OperatorPlatformNameShort} [role="_abstract"] -The {OperatorPlatform} provides cloud-native, push-button deployment of new {PlatformNameShort} instances in your OpenShift environment. -The {OperatorPlatform} includes resource types to deploy and manage instances of {ControllerNameStart} and {PrivateHubName}. +The {OperatorPlatformNameShort} provides cloud-native, push-button deployment of new {PlatformNameShort} instances in your OpenShift environment. +The {OperatorPlatformNameShort} includes resource types to deploy and manage instances of {ControllerName} and {PrivateHubName}. It also includes {ControllerName} job resources for defining and launching jobs inside your {ControllerName} deployments. Deploying {PlatformNameShort} instances with a Kubernetes native operator offers several advantages over launching instances from a playbook deployed on {OCP}, including upgrades and full lifecycle support for your {PlatformName} deployments. -You can install the {OperatorPlatform} from the Red Hat Operators catalog in OperatorHub. +You can install the {OperatorPlatformNameShort} from the Red Hat Operators catalog in OperatorHub. + +For information about the {OperatorPlatformNameShort} infrastructure topology see {BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/tested_deployment_models/index#container-topologies[Container topologies] in _{TitleTopologies}_. + diff --git a/downstream/modules/platform/con-about-platform-gateway.adoc b/downstream/modules/platform/con-about-platform-gateway.adoc new file mode 100644 index 0000000000..85791f6fea --- /dev/null +++ b/downstream/modules/platform/con-about-platform-gateway.adoc @@ -0,0 +1,15 @@ +[id="con-about-platform-gateway_{context}"] + += {GatewayStart} + +[role="_abstract"] +// content taken from snippets/snip-gateway-component-description.adoc and con-gw-activity-stream.adoc +{GatewayStart} is the service that handles authentication and authorization for the {PlatformNameShort}. It provides a single entry into the {PlatformNameShort} and serves the platform user interface so you can authenticate and access all of the {PlatformNameShort} services from a single location. For more information about the services available in the {PlatformNameShort}, refer to link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/getting_started_with_ansible_automation_platform/index#assembly-gs-key-functionality[Key functionality and concepts] in _{TitleGettingStarted}_. + +The {Gateway} includes an activity stream that captures changes to gateway resources, such as the creation or modification of organizations, users, and service clusters, among others. For each change, the activity stream collects information about the time of the change, the user that initiated the change, the action performed, and the actual changes made to the object, when possible. The information gathered varies depending on the type of change. + +You can access the details captured by the activity stream from the API: + +----- +/api/gateway/v1/activitystream/ +----- diff --git a/downstream/modules/platform/con-adding-subscription-manifest.adoc b/downstream/modules/platform/con-adding-subscription-manifest.adoc new file mode 100644 index 0000000000..35362a6069 --- /dev/null +++ b/downstream/modules/platform/con-adding-subscription-manifest.adoc @@ -0,0 +1,7 @@ +[id="con-adding-subscription-manifest"] + += Adding a subscription manifest to {PlatformNameShort} + +[role="_abstract"] + +Before you first log in, you must add your subscription information to the platform. To add a subscription to {PlatformNameShort}, see link:{URLCentralAuth}/assembly-gateway-licensing#assembly-aap-obtain-manifest-files[Obtaining a manifest file] in the link:{LinkCentralAuth} guide. diff --git a/downstream/modules/platform/con-automation-mesh-node-types.adoc b/downstream/modules/platform/con-automation-mesh-node-types.adoc index 4948f1ef18..1c1cd3543a 100644 --- a/downstream/modules/platform/con-automation-mesh-node-types.adoc +++ b/downstream/modules/platform/con-automation-mesh-node-types.adoc @@ -24,7 +24,7 @@ The *control plane* consists of hybrid and control nodes. Instances in the contr * *Control nodes* - control nodes run project and inventory updates and system jobs, but not regular jobs. Execution capabilities are disabled on these nodes. endif::mesh-VM[] ifdef::operator-mesh[] -Instances in the control plane run persistent {ControllerName} services such as the web server and task dispatcher, in addition to project updates, and management jobs. +Instances in the control plane run persistent {PlatformNameShort} services such as the web server and task dispatcher, in addition to project updates, and management jobs. However, in the operator-based model, there are no hybrid or control nodes. There are container groups, which make up containers running on the Kubernetes cluster. That comprises the control plane. diff --git a/downstream/modules/platform/con-backup-aap.adoc b/downstream/modules/platform/con-backup-aap.adoc index d0984d3350..5b67927d25 100644 --- a/downstream/modules/platform/con-backup-aap.adoc +++ b/downstream/modules/platform/con-backup-aap.adoc @@ -2,16 +2,17 @@ = Back up your {PlatformNameShort} instance -Back up an existing {PlatformNameShort} instance by running the `.setup.sh` script with the `backup_dir` flag, which saves the content and configuration of your current environment: +Back up an existing {PlatformNameShort} instance by running the `.setup.sh` script with the `backup_dir` flag, which saves the content and configuration of your current environment. -. Navigate to your `ansible-tower-setup-latest` directory. +.Procedure + +. Navigate to your {PlatformNameShort} installation directory. . Run the `./setup.sh` script following the example below: + ---- -$ ./setup.sh -e ‘backup_dir=/ansible/mybackup’ -e ‘use_compression=True’ @credentials.yml -b <1><2> +$ ./setup.sh -e ‘backup_dir=/ansible/mybackup’ -e ‘use_compression=True’ @credentials.yml -b <1> ---- <1> `backup_dir` specifies a directory to save your backup to. -<2> `@credentials.yml` passes the password variables and their values encrypted via `ansible-vault`. -With a successful backup, a backup file is created at `/ansible/mybackup/tower-backup-latest.tar.gz` . +With a successful backup, a backup file is created at `/ansible/mybackup/automation-platform-backup-.tar.gz` . diff --git a/downstream/modules/platform/con-building-an-execution-environment-in-a-disconnected-environment.adoc b/downstream/modules/platform/con-building-an-execution-environment-in-a-disconnected-environment.adoc index f702eb5c9b..e89869e5c2 100644 --- a/downstream/modules/platform/con-building-an-execution-environment-in-a-disconnected-environment.adoc +++ b/downstream/modules/platform/con-building-an-execution-environment-in-a-disconnected-environment.adoc @@ -6,21 +6,22 @@ = Building an {ExecEnvShort} in a disconnected environment -link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/creating_and_consuming_execution_environments/index[Creating execution environments] for {PlatformNameShort} is a common task which works differently in disconnected environments. When building a custom {ExecEnvShort}, the ansible-builder tool defaults to downloading content from the following locations on the internet: +Creating execution environments for {PlatformNameShort} is a common task which works differently in disconnected environments. When building a custom {ExecEnvShort}, the ansible-builder tool defaults to downloading content from the following locations on the internet: * Red Hat {HubNameStart} ({Console}) or {Galaxy} (galaxy.ansible.com) for any Ansible content collections added to the {ExecEnvShort} image. * PyPI (pypi.org) for any python packages required as collection dependencies. -* RPM repositories such as the RHEL or UBI repositories (cdn.redhat.com) for adding or updating RPMs to the execution environment image, if needed. +* RPM repositories such as the RHEL or UBI repositories (cdn.redhat.com) for adding or updating RPMs to the {ExecEnvShort} image, if needed. * registry.redhat.io for access to the base container images. -Building an {ExecEnvShort} image in a disconnected environment requires mirroring content from these locations. See link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/red_hat_ansible_automation_platform_installation_guide/index#importing-collections-into-private-automation-hub_disconnected-installation[Importing Collections into private automation hub] for information on importing collections from Ansible {Galaxy} or {HubName} into a {PrivateHubName}. +Building an {ExecEnvShort} image in a disconnected environment requires mirroring content from these locations. +For information about importing collections from {Galaxy} or {HubName} into a {PrivateHubName}, see link:{URLHubManagingContent}/managing-collections-hub#proc-import-collection[Importing an automation content collection in {HubName}] . -Mirrored PyPI content once transferred into the disconnected network can be made available using a web server or an artifact repository like Nexus. The RHEL and UBI repository content can be exported from an internet-facing Red Hat Satellite server, copied into the disconnected environment, then imported into a disconnected Satellite so it is available for building custom {ExecEnvShort}s. See link:{BaseURL}/red_hat_satellite/{SatelliteVers}/html-single/installing_satellite_server_in_a_disconnected_network_environment/index#iss_export_sync_in_an_air_gapped_scenario[ISS Export Sync in an Air-Gapped Scenario] for details. +Mirrored PyPI content once transferred into the disconnected network can be made available by using a web server or an artifact repository such as Nexus. The RHEL and UBI repository content can be exported from an internet-facing Red Hat Satellite Server, copied into the disconnected environment, then imported into a disconnected Satellite so it is available for building custom {ExecEnvShort}s. See link:{BaseURL}/red_hat_satellite/{SatelliteVers}/html-single/installing_satellite_server_in_a_disconnected_network_environment/index#iss_export_sync_in_an_air_gapped_scenario[ISS Export Sync in an Air-Gapped Scenario] for details. -The default base container image, ee-minimal-rhel8, is used to create custom {ExecEnvShort} images and is included with the bundled installer. This image is added to the {PrivateHubName} at install time. If a different base container image such as ee-minimal-rhel9 is required, it must be imported to the disconnected network and added to the {PrivateHubName} container registry. +The default base container image, ee-minimal-rhel8, is used to create custom {ExecEnvShort} images and is included with the bundled installer. This image is added to the {PrivateHubName} at install time. If a different base container image such as ee-minimal-rhel9 is required, it must be imported to the disconnected network and added to the {PrivateHubName} container registry. Once all of the prerequisites are available on the disconnected network, the ansible-builder command can be used to create custom {ExecEnvShort} images. diff --git a/downstream/modules/platform/con-controller-access-organizations.adoc b/downstream/modules/platform/con-controller-access-organizations.adoc index d916d64d7b..a89326e8f7 100644 --- a/downstream/modules/platform/con-controller-access-organizations.adoc +++ b/downstream/modules/platform/con-controller-access-organizations.adoc @@ -1,24 +1,8 @@ -[id="con-controller-access-organizations"] - -= Access to organizations - -* Select btn:[Access] when viewing your organization to display the users associated with this organization, and their -roles. +:_mod-docs-content-type: CONCEPT -image:organizations-show-users-permissions-organization.png[Organization access] - -Use this page to complete the following tasks: - -* Manage the user membership for this organization. -Click btn:[Users] on the navigation panel to manage user membership on a per-user basis from the *Users* page. -* Assign specific users certain levels of permissions within your organization. -* Enable them to act as an administrator for a particular resource. -For more information, see link:https://docs.ansible.com/automation-controller/latest/html/userguide/security.html#rbac-ug[Role-Based Access Controls]. +[id="con-controller-access-organizations"] -Click a user to display that user's details. -You can review, grant, edit, and remove associated permissions for that user. -For more information, see xref:assembly-controller-users[Users]. += Access to organizations +You can manage access to an organization by selecting an organization from the *Organizations* list view and selecting the associated tabs for providing access to xref:proc-controller-add-organization-user[Users], xref:proc-gw-add-admin-organization[Administrators] or xref:proc-gw-add-team-organization[Teams]. -include::proc-controller-add-organization-user.adoc[leveloffset=+1] -include::ref-controller-organization-notifications.adoc[leveloffset=+1] diff --git a/downstream/modules/platform/con-controller-administration.adoc b/downstream/modules/platform/con-controller-administration.adoc index 5af49ae1b7..5b0556c949 100644 --- a/downstream/modules/platform/con-controller-administration.adoc +++ b/downstream/modules/platform/con-controller-administration.adoc @@ -5,16 +5,11 @@ The *Administration* menu provides access to the administrative options of {ControllerName}. From here, you can create, view, and edit: -* xref:assembly-controller-custom-credentials[Credential types] -* xref:controller-notifications[Notifications] -* Management_jobs -* xref:controller-instance-groups[Instance groups] -* Instances -* xref:assembly-controller-applications[Applications] -* xref:assembly-controller-execution-environments[Execution environments] -//Topology View is in the Admin Guide -* Topology view -//Next version includes -//* Instance Groups -//* Instances -//* Execution Environments +//activity stream is an unconnected procedure. It needs a home. +* xref:assembly-controller-activity-stream[Activity Stream] +* xref:controller-approval-nodes[Workflow Approvals] +* xref:controller-notifications[Notifiers] +* link:{URLControllerAdminGuide}/assembly-controller-management-jobs[Management Jobs] + + + diff --git a/downstream/modules/platform/con-controller-api-basic-auth.adoc b/downstream/modules/platform/con-controller-api-basic-auth.adoc index fe9f6ad3a7..5fb948dea9 100644 --- a/downstream/modules/platform/con-controller-api-basic-auth.adoc +++ b/downstream/modules/platform/con-controller-api-basic-auth.adoc @@ -24,6 +24,6 @@ You can disable Basic authentication for security purposes. .Procedure -. From the navigation panel, select {MenuAEAdminSettings}. -. Select *Miscellaneous Authentication settings* from the list of *System* options. -. Disable the option to *Enable HTTP Basic Auth*. +. From the navigation panel, select {MenuSetGateway}. +. Click btn:[Edit {Gateway} settings]. +. Disable the option *Gateway basic auth enabled*. diff --git a/downstream/modules/platform/con-controller-api-oauth2-token.adoc b/downstream/modules/platform/con-controller-api-oauth2-token.adoc index 20cebfcb10..84dbe763e1 100644 --- a/downstream/modules/platform/con-controller-api-oauth2-token.adoc +++ b/downstream/modules/platform/con-controller-api-oauth2-token.adoc @@ -7,7 +7,7 @@ OAuth 2 authentication is commonly used when interacting with the {ControllerNam Similar to Basic authentication, you are given an OAuth 2 token with each API request through the Authorization header. Unlike Basic authentication, OAuth 2 tokens have a configurable timeout and are scopable. Tokens have a configurable expiration time and can be easily revoked for one user or for the entire {ControllerName} system by an administrator if needed. -You can do this with the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#ref-controller-revoke-oauth2-token[revoke_oauth2_tokens] management command, or by using the API as explained in link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#ref-controller-revoke-access-token[Revoke an access token]. +You can do this with the link:{URLCentralAuth}/gw-token-based-authentication#ref-controller-revoke-oauth2-token[revoke_oauth2_tokens] management command, or by using the API as explained in link:{URLCentralAuth}/gw-token-based-authentication#ref-controller-revoke-access-token[Revoke an access token]. The different methods for obtaining OAuth2 access tokens in {ControllerName} include the following: @@ -16,8 +16,8 @@ The different methods for obtaining OAuth2 access tokens in {ControllerName} inc * Application token: Implicit grant type * Application token: Authorization Code grant type -A user needs to create an OAuth 2 token in the API or in the *Users* > *Tokens* tab of the {ControllerName} UI. -For more information about creating tokens through the UI, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#proc-controller-user-tokens[Users - Tokens]. +A user needs to create an OAuth 2 token in the API or in the {MenuAMAdminOauthApps} tab of the {Gateway} UI. +For more information about creating tokens through the UI, see link:{URLCentralAuth}/gw-token-based-authentication#proc-controller-apps-create-tokens[Adding tokens]. For the purpose of this example, use the PAT method for creating a token in the API. After you create it, you can set the scope. @@ -25,7 +25,7 @@ After you create it, you can set the scope. [NOTE] ==== You can configure the expiration time of the token system-wide. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#ref-controller-use-oauth2-token-system[Using OAuth 2 Token System for Personal Access Tokens]. +For more information, see link:{URLCentralAuth}/gw-token-based-authentication[Configuring access to external applications with token-based authentication]. ==== Token authentication is best used for any programmatic use of the {ControllerName} API, such as Python scripts or tools such as curl. @@ -88,7 +88,7 @@ print(json.dumps(response.json(), indent=4, sort_keys=True)) .Additional resources -For more information about obtaining OAuth2 access tokens and how to use OAuth 2 in the context of external applications, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#assembly-controller-token-based-authentication[Token-Based Authentication] in the _{ControllerAG}_. +For more information about obtaining OAuth2 access tokens and how to use OAuth 2 in the context of external applications, see link:{URLCentralAuth}/gw-token-based-authentication[Configuring access to external applications with token-based authentication]. [discrete] == Enabling external users to create OAuth 2 tokens @@ -97,6 +97,6 @@ By default, external users such as those created by single sign-on are not able .Procedure -. From the navigation panel, select {MenuAEAdminSettings}. -. Select *Miscellaneous Authentication settings* from the list of *System* options. -. Enable the option to *Allow External Users to Create OAuth2 Tokens*. +. From the navigation panel, select {MenuSetGateway}. +. Select btn:[Edit {Gateway} settings]. +. Enable the option to *Allow external users to create OAuth2 tokens*. diff --git a/downstream/modules/platform/con-controller-api-sso-auth.adoc b/downstream/modules/platform/con-controller-api-sso-auth.adoc index f12e5ec2e4..d22c228f05 100644 --- a/downstream/modules/platform/con-controller-api-sso-auth.adoc +++ b/downstream/modules/platform/con-controller-api-sso-auth.adoc @@ -11,5 +11,4 @@ If you click that option, it redirects you to the Identity Provider, in this cas .Additional resources -For the various types of supported SSO authentication methods, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#assembly-controller-set-up-social-authentication[Setting up social authentication] and link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#controller-set-up-enterprise-authentication[Setting up enterprise authentication] in the _{ControllerAG}_. - +For the various types of supported SSO authentication methods, see link:{URLCentralAuth}/gw-configure-authentication#gw-config-authentication-type[Configuring an authentication type]. diff --git a/downstream/modules/platform/con-controller-api-tools.adoc b/downstream/modules/platform/con-controller-api-tools.adoc index c0d0adca67..6c3723590a 100644 --- a/downstream/modules/platform/con-controller-api-tools.adoc +++ b/downstream/modules/platform/con-controller-api-tools.adoc @@ -13,6 +13,7 @@ Further options include the following: * link:http://www.telerik.com/fiddler[Fiddler] * link:https://mitmproxy.org/[mitmproxy] -* link:https://addons.mozilla.org/en-US/firefox/addon/live-http-headers/[Live HTTP headers FireFox extension] -* link:http://sourceforge.net/projects/paros/[Paros] +// * [emcwhinn] Link deprecated +// link:https://addons.mozilla.org/en-US/firefox/addon/live-http-headers/[Live HTTP headers FireFox extension] +* link:https://sourceforge.net/projects/paros/[Paros] diff --git a/downstream/modules/platform/con-controller-benefits-fact-caching.adoc b/downstream/modules/platform/con-controller-benefits-fact-caching.adoc index 903a5f158f..d4154755ff 100644 --- a/downstream/modules/platform/con-controller-benefits-fact-caching.adoc +++ b/downstream/modules/platform/con-controller-benefits-fact-caching.adoc @@ -14,9 +14,9 @@ Custom fact caching could conflict with the controller's fact caching feature. You must use the fact caching module that includes {ControllerName}. ==== -You can select to use cached facts in your job by checking the *Enable Fact Storage* option when you create or edit a job template. +You can select to use cached facts in your job by checking the *Enable fact storage* option when you create or edit a job template. -image::ug-job-templates-options-use-factcache.png[Cached facts] +//image::ug-job-templates-options-use-factcache.png[Cached facts] To clear facts, run the Ansible `clear_facts` link:https://docs.ansible.com/ansible/latest/collections/ansible/builtin/meta_module.html#examples[meta task]. The following is an example playbook that uses the Ansible `clear_facts` meta task. diff --git a/downstream/modules/platform/con-controller-capacity-determination.adoc b/downstream/modules/platform/con-controller-capacity-determination.adoc index ba9ad91962..95e29fb134 100644 --- a/downstream/modules/platform/con-controller-capacity-determination.adoc +++ b/downstream/modules/platform/con-controller-capacity-determination.adoc @@ -2,7 +2,7 @@ = {ControllerNameStart} capacity determination and job impact -The {ControllerNameStart} capacity system determines how many jobs can run on an instance given the amount of resources available to the instance and the size of the jobs that are running (referred to as Impact). +The {ControllerName} capacity system determines how many jobs can run on an instance given the amount of resources available to the instance and the size of the jobs that are running (referred to as Impact). The algorithm used to determine this is based on the following two things: * How much memory is available to the system (`mem_capacity`) @@ -13,7 +13,7 @@ Since groups are made up of instances, instances can also be assigned to multipl This means that impact to one instance can affect the overall capacity of other groups. Instance groups, not instances themselves, can be assigned to be used by jobs at various levels. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/controller-clustering[Clustering] in the _{ControllerAG}_. +For more information, see link:{URLControllerAdminGuide}/controller-clustering[Clustering] in _{ControllerAG}_. When the Task Manager prepares its graph to determine which group a job runs on, it commits the capacity of an instance group to a job that is not ready to start yet. @@ -22,5 +22,5 @@ This guarantees that jobs do not get stuck as a result of an under-provisioned s .Additional resources -* For information on container groups, see link:{BaseURL}red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/controller-instance-and-container-groups#controller-container-capacity[Container capacity limits] in the _{ControllerAG}_. -* For information on sliced jobs and their impact to capacity, see xref:controller-job-slice-execution-behavior[Job slice execution behavior]. +* For information about container groups, see link:{URLControllerAdminGuide}/assembly-controller-improving-performance#ref-controller-settings-control-execution-nodes[Capacity settings for instance group and container group] in _{ControllerAG}_. +* For information about sliced jobs and their impact to capacity, see xref:controller-job-slice-execution-behavior[Job slice execution behavior]. diff --git a/downstream/modules/platform/con-controller-cleanup-expired-sessions.adoc b/downstream/modules/platform/con-controller-cleanup-expired-sessions.adoc index 379a0d363a..d2756c851b 100644 --- a/downstream/modules/platform/con-controller-cleanup-expired-sessions.adoc +++ b/downstream/modules/platform/con-controller-cleanup-expired-sessions.adoc @@ -9,4 +9,4 @@ For more information, see xref:proc-controller-scheduling-deletion[Scheduling de You can also set or review notifications associated with this management job the same way as described in xref:proc-controller-management-notifications[Notifications] for activity stream management jobs. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-notifications[Notifications] in the _{ControllerUG}_. +For more information, see link:{URLControllerUserGuide}/controller-notifications[Notifiers] in _{ControllerUG}_. diff --git a/downstream/modules/platform/con-controller-configure-hostname-notifications.adoc b/downstream/modules/platform/con-controller-configure-hostname-notifications.adoc index 02a8fed400..f6e8cc053c 100644 --- a/downstream/modules/platform/con-controller-configure-hostname-notifications.adoc +++ b/downstream/modules/platform/con-controller-configure-hostname-notifications.adoc @@ -2,7 +2,7 @@ = Configure the host hostname for notifications -In link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/controller-config#controller-configure-system[System settings], you can replace the default value in the *Base URL of the service* field with your preferred hostname to change the notification hostname. +In link:{URLControllerAdminGuide}/controller-config#controller-configure-system[System settings], you can replace the default value in the *Base URL of the service* field with your preferred hostname to change the notification hostname. //image::ug-system-misc-baseurl.png[System Base URL] diff --git a/downstream/modules/platform/con-controller-container-groups.adoc b/downstream/modules/platform/con-controller-container-groups.adoc index 33729a878d..1a6f966313 100644 --- a/downstream/modules/platform/con-controller-container-groups.adoc +++ b/downstream/modules/platform/con-controller-container-groups.adoc @@ -16,4 +16,4 @@ Container groups upgraded from versions before {ControllerName} 4.0 revert back ==== Container groups are different from {ExecEnvShort}s in that {ExecEnvShort}s are container images and do not use a virtual environment. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#assembly-controller-execution-environments[Execution environments] in the _{ControllerUG}_. +For more information, see xref:assembly-controller-execution-environments[Execution environments]. diff --git a/downstream/modules/platform/con-controller-create-users.adoc b/downstream/modules/platform/con-controller-create-users.adoc index 695031caa6..ab57210c2d 100644 --- a/downstream/modules/platform/con-controller-create-users.adoc +++ b/downstream/modules/platform/con-controller-create-users.adoc @@ -1,3 +1,5 @@ +:_mod-docs-content-type: CONCEPT + [id="controller-create-users"] Users associated with an organization are shown in the *Access* tab of the organization. diff --git a/downstream/modules/platform/con-controller-custom-dynamic-inv-scripts.adoc b/downstream/modules/platform/con-controller-custom-dynamic-inv-scripts.adoc index 14db49d847..8d9a2d09d4 100644 --- a/downstream/modules/platform/con-controller-custom-dynamic-inv-scripts.adoc +++ b/downstream/modules/platform/con-controller-custom-dynamic-inv-scripts.adoc @@ -19,4 +19,5 @@ The credential type must specify all the necessary types of inputs. Then, when you create a credential of this type, the secrets are stored in an encrypted form. If you apply that credential to the inventory source, the script has access to those inputs. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#assembly-controller-custom-credentials[Custom Credential Types] in the {ControllerUG}. +TBD +//For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#assembly-controller-custom-credentials[Custom Credential Types] in _{ControllerUG}_. diff --git a/downstream/modules/platform/con-controller-custom-logos.adoc b/downstream/modules/platform/con-controller-custom-logos.adoc index 095dfbe813..45ca86300f 100644 --- a/downstream/modules/platform/con-controller-custom-logos.adoc +++ b/downstream/modules/platform/con-controller-custom-logos.adoc @@ -3,7 +3,7 @@ = Custom logos and images {ControllerNameStart} supports the use of a custom logo. -You can add a custom logo by uploading an image and supplying a custom login message from the *Platform gateway settings* page. From the navigation panel, select {MenuSetGateway}. +You can add a custom logo by uploading an image and supplying a custom login message from the *{GatewayStart} settings* page. From the navigation panel, select {MenuSetGateway}. //image::ag-configure-aap-ui.png[Custom logo] For the best results, use a `.png` file with a transparent background. diff --git a/downstream/modules/platform/con-controller-deprovision-instance-groups.adoc b/downstream/modules/platform/con-controller-deprovision-instance-groups.adoc index 3346b21928..828603c518 100644 --- a/downstream/modules/platform/con-controller-deprovision-instance-groups.adoc +++ b/downstream/modules/platform/con-controller-deprovision-instance-groups.adoc @@ -39,7 +39,7 @@ awx-manage unregister_queue --queuename= Removing an instance's membership from an instance group in the inventory file and re-running the setup playbook does not ensure that the instance is not added back to a group. To be sure that an instance is not added back to a group, remove it through the API and also remove it in your inventory file. You can also stop defining instance groups in the inventory file. You can manage instance group topology through the {ControllerName} UI. -For more information about managing instance groups in the UI, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#controller-instance-groups[Managing Instance Groups] in the _{ControllerUG}_. +For more information about managing instance groups in the UI, see xref:controller-instance-groups[Managing Instance Groups]. [NOTE] ==== diff --git a/downstream/modules/platform/con-controller-host-metric-utilities.adoc b/downstream/modules/platform/con-controller-host-metric-utilities.adoc index 8581def9e4..12e50b5a62 100644 --- a/downstream/modules/platform/con-controller-host-metric-utilities.adoc +++ b/downstream/modules/platform/con-controller-host-metric-utilities.adoc @@ -6,5 +6,5 @@ You can also soft delete hosts in bulk through the API. ifdef::controller-GS,controller-AG[] -For more information, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#controller-host-metric-utilities[Host metrics utilities] section of the _{ControllerUG}_. +For more information, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#controller-host-metric-utilities[Host metrics utilities] section of _{ControllerUG}_. endif::controller-GS,controller-AG[] diff --git a/downstream/modules/platform/con-controller-infrastructure.adoc b/downstream/modules/platform/con-controller-infrastructure.adoc new file mode 100644 index 0000000000..837dbe18d7 --- /dev/null +++ b/downstream/modules/platform/con-controller-infrastructure.adoc @@ -0,0 +1,14 @@ +[id="con-controller-infrastructure"] + += Infrastructure menu + +The *Infrastucture* menu provides quick access to the following {ControllerName} resources: + +* xref:assembly-controller-topology-viewer[Topology View] +* xref:controller-inventories[Inventories] +* xref:assembly-controller-hosts[Hosts] +* xref:controller-instance-groups[Instance Groups] +* xref:assembly-controller-instances[Instances] +* xref:assembly-controller-execution-environments[Execution Environments] +* xref:controller-credentials[Credentials] +* xref:ref-controller-credential-types[Credential Types] \ No newline at end of file diff --git a/downstream/modules/platform/con-controller-instance-groups.adoc b/downstream/modules/platform/con-controller-instance-groups.adoc index 0215512211..4b9b8f05fb 100644 --- a/downstream/modules/platform/con-controller-instance-groups.adoc +++ b/downstream/modules/platform/con-controller-instance-groups.adoc @@ -1,4 +1,4 @@ -[id="controller-instance-groups"] +[id="con-controller-instance-groups"] = Instance groups diff --git a/downstream/modules/platform/con-controller-inventory-sync-jobs.adoc b/downstream/modules/platform/con-controller-inventory-sync-jobs.adoc index 78b0fcd149..1dab2a3797 100644 --- a/downstream/modules/platform/con-controller-inventory-sync-jobs.adoc +++ b/downstream/modules/platform/con-controller-inventory-sync-jobs.adoc @@ -4,19 +4,19 @@ When an inventory synchronization is executed, the results display in the *Output* tab. -For more information on inventory synchronization, see xref:ref-controller-constructed-inventories[Constructed inventories]. +For more information about inventory synchronization, see xref:ref-controller-constructed-inventories[Constructed inventories]. If used, the Ansible CLI displays the same information. This can be useful for debugging. The `ANSIBLE_DISPLAY_ARGS_TO_STDOUT` parameter is set to `False` for all playbook runs. This parameter matches Ansible's default behavior and does not display task arguments in task headers in the *Job Detail* interface to avoid leaking certain sensitive module parameters to `stdout`. -To restore the previous behavior, set `ANSIBLE_DISPLAY_ARGS_TO_STDOUT` to `True` through the `AWX_TASK_ENV` configuration setting. +To restore the earlier behavior, set `ANSIBLE_DISPLAY_ARGS_TO_STDOUT` to `True` through the `AWX_TASK_ENV` configuration setting. For more information, see link:http://docs.ansible.com/ansible/latest/reference_appendices/config.html#envvar-ANSIBLE_DISPLAY_ARGS_TO_STDOUT[ANSIBLE_DISPLAY_ARGS_TO_STDOUT] in the ansible documentation. -Use the icons to relaunch image:rightrocket.png[Launch,15,15], download image:download.png[Download,15,15] the job output, or delete image:delete-button.png[Delete,15,15] the job. +You can btn:[Relaunch job], btn:[Cancel job], download image:download.png[Download,15,15] the job output, or delete image:delete-button.png[Delete,15,15] the job. -image::ug-show-job-results-for-inv-sync.png[Job results inventory sync] +//image::ug-show-job-results-for-inv-sync.png[Job results inventory sync] [NOTE] ==== diff --git a/downstream/modules/platform/con-controller-job-branch-overriding.adoc b/downstream/modules/platform/con-controller-job-branch-overriding.adoc index 5a542ea4bc..4541e5a82a 100644 --- a/downstream/modules/platform/con-controller-job-branch-overriding.adoc +++ b/downstream/modules/platform/con-controller-job-branch-overriding.adoc @@ -7,5 +7,5 @@ These are represented by the values specified in the *Type Details* fields: image::ug-scm-project-branching-emphasized.png[Project branching emphasized] -When creating or editing a job you have the option to *Allow Branch Override*. +When creating or editing a job you have the option to *Allow branch override*. When this option is checked, project administrators can delegate branch selection to the job templates that use that project, requiring only project `use_role`. diff --git a/downstream/modules/platform/con-controller-keep-subscription-in-compliance.adoc b/downstream/modules/platform/con-controller-keep-subscription-in-compliance.adoc index 2e53bb851c..52190b0976 100644 --- a/downstream/modules/platform/con-controller-keep-subscription-in-compliance.adoc +++ b/downstream/modules/platform/con-controller-keep-subscription-in-compliance.adoc @@ -57,5 +57,5 @@ These are soft-deleted, meaning their records are not removed, but are not being endif::controller-UG,controller-AG[] ifdef::controller-GS,controller-AG[] -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#controller-keep-subscription-in-compliance[Troubleshooting: Keeping your subscription in compliance] in the _{ControllerUG}_. +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#controller-keep-subscription-in-compliance[Troubleshooting: Keeping your subscription in compliance] in _{ControllerUG}_. endif::controller-GS,controller-AG[] diff --git a/downstream/modules/platform/con-controller-notification-workflow.adoc b/downstream/modules/platform/con-controller-notification-workflow.adoc index 8bd7d221a4..c0840eecf3 100644 --- a/downstream/modules/platform/con-controller-notification-workflow.adoc +++ b/downstream/modules/platform/con-controller-notification-workflow.adoc @@ -2,7 +2,7 @@ = Notification workflow -When a job succeeds or fails, the error or success handler pulls a list of relevant notification templates using the procedure defined in the xref:controller-notifications[Notifications] section. +When a job succeeds or fails, the error or success handler pulls a list of relevant notification templates using the procedure defined in the xref:controller-notifications[Notifiers] section. It then creates a notification object for each one, containing relevant details about the job and sends it to the destination. These include email addresses, slack channels, and SMS numbers. diff --git a/downstream/modules/platform/con-controller-overview-details.adoc b/downstream/modules/platform/con-controller-overview-details.adoc index 7d165bbfbe..037e137531 100644 --- a/downstream/modules/platform/con-controller-overview-details.adoc +++ b/downstream/modules/platform/con-controller-overview-details.adoc @@ -24,7 +24,7 @@ If you want to give any user or team permissions to use a job template, you can An auditor is useful for a service account that scrapes automation information from the REST API. .Additional resources -* For more information about user roles, see xref:con-controller-rbac[Role-Based Access Controls]. +* For more information about user roles, see link:{URLCentralAuth}/gw-managing-access[Managing access with role based access control]. = Cloud and autoscaling flexibility {ControllerNameStart} includes a powerful optional provisioning callback feature that enables nodes to request configuration on-demand. @@ -48,16 +48,16 @@ For more information, see xref:ref-projects-galaxy-support[Ansible Galaxy Suppor = Inventory support for OpenStack Dynamic inventory support is available for OpenStack. With this you can target any of the virtual machines or images running in your OpenStack cloud. -For more information, see the xref:ref-controller-credential-openstack[OpenStack credential type] section. +For more information, see xref:ref-controller-credential-openstack[OpenStack credential type]. = Remote command execution -Use remote command execution to perform a simple tasks, such as adding a single user, updating a single security vulnerability, or restarting a failing service. +Use remote command execution to perform a simple task, such as adding a single user, updating a single security vulnerability, or restarting a failing service. Any task that you can describe as a single Ansible play can be run on a host or group of hosts in your inventory. You can manage your systems quickly and easily. Because of an RBAC engine and detailed audit logging, you know which user has completed a specific task. = System tracking -You can collect facts using the fact caching feature. For more information, see xref:controller-fact-caching[Fact Caching]. +You can collect facts by using the fact caching feature. For more information, see xref:controller-fact-caching[Fact Caching]. = Integrated notifications Keep track of the status of your automation. @@ -91,7 +91,7 @@ For more information, see xref:proc-controller-inv-source-satellite[Red Hat Sate * Red Hat Insights integration, enabling Insights playbooks to be used as an {PlatformNameShort} project. -For more information, see xref:controller-setting-up-insights[Setting up Insights Remediations]. +For more information, see xref:controller-setting-up-insights[Setting up Red Hat Insights for {PlatformName} Remediations]. * {HubNameStart} acts as a content provider for {ControllerName}, requiring both an {ControllerName} deployment and an {HubName} deployment running alongside each other. @@ -134,7 +134,7 @@ This increases reliability, offers faster job completion, and improved cluster u For example, you can change a parameter across 15,000 switches at scale, or gather information across your multi-thousand-node RHEL estate. -For more information, see xref:controller-job-slicing[Job Slicing]. +For more information, see xref:controller-job-slicing[Job slicing]. = Support for deployment in a FIPS-enabled environment {ControllerNameStart} deploys and runs in restricted modes such as FIPS. diff --git a/downstream/modules/platform/con-controller-playbook-run-jobs.adoc b/downstream/modules/platform/con-controller-playbook-run-jobs.adoc index 08fda30a12..8c7810abb0 100644 --- a/downstream/modules/platform/con-controller-playbook-run-jobs.adoc +++ b/downstream/modules/platform/con-controller-playbook-run-jobs.adoc @@ -5,7 +5,7 @@ When a playbook is executed, the results display in the *Output* tab. If used, the Ansible CLI displays the same information. This can be useful for debugging. -image::ug-results-for-example-job.png[Results for example job] +//image::ug-results-for-example-job.png[Results for example job] The events summary displays the following events that are run as part of this playbook: @@ -16,9 +16,9 @@ The events summary displays the following events that are run as part of this pl image::ug-jobs-events-summary.png[Job events summary] -Use the icons next to the events to relaunch (image:rightrocket.png[Rightrocket,15,15]), download (image:download.png[Download,15,15]) the job output, or delete (image:delete-button.png[Delete,15,15]) the job. +You can btn:[Relaunch job], btn:[Cancel job], download image:download.png[Download,15,15] the job output, or delete image:delete-button.png[Delete,15,15] the job. Hover over a section of the host status bar in the *Output* view and the number of hosts associated with that status displays. The output for a playbook job is also available after launching a job from the *Jobs* tab of its *Jobs Templates* page. -View its host details by clicking on the line item tasks in the output. +View its host details by clicking the line item tasks in the output. diff --git a/downstream/modules/platform/con-controller-project-revision-behavior.adoc b/downstream/modules/platform/con-controller-project-revision-behavior.adoc index 334f8adc30..db4553db5b 100644 --- a/downstream/modules/platform/con-controller-project-revision-behavior.adoc +++ b/downstream/modules/platform/con-controller-project-revision-behavior.adoc @@ -2,15 +2,15 @@ = Project revision behavior -During a project update, the revision of the default branch (specified in the *SCM Branch* field of the project) is stored when updated. -If providing a non-default *SCM Branch* (not a commit hash or tag) in a job, the newest revision is pulled from the source control remote immediately before the job starts. -This revision is shown in the *Source Control Revision* field of the job and its project update. +During a project update, the revision of the default branch (specified in the *Source control branch* field of the project) is stored when updated. +If providing a non-default *Source control branch* (not a commit hash or tag) in a job, the newest revision is pulled from the source control remote immediately before the job starts. +This revision is shown in the *Source control revision* field of the job and its project update. -image::ug-output-branch-override.png[Jobs output override example] +//image::ug-output-branch-override.png[Jobs output override example] As a result, offline job runs are impossible for non-default branches. To ensure that a job is running a static version from source control, use tags or commit hashes. Project updates do not save all branches, only the project default branch. -The *SCM Branch* field is not validated, so the project must update to assure it is valid. -If this field is provided or prompted for, the *Playbook* field of job templates is not validated, and you have to launch the job template in order to verify presence of the expected playbook. +The *Source control branch* field is not validated, so the project must update to assure it is valid. +If this field is provided or prompted for, the *Playbook* field of job templates is not validated, and you have to launch the job template to verify presence of the expected playbook. diff --git a/downstream/modules/platform/con-controller-resources.adoc b/downstream/modules/platform/con-controller-resources.adoc index 9c99e79d93..dfdcd76284 100644 --- a/downstream/modules/platform/con-controller-resources.adoc +++ b/downstream/modules/platform/con-controller-resources.adoc @@ -5,7 +5,7 @@ The *Resources* menu provides access to the following components of {ControllerName}: * Templates -* xref:controller-credentials[Credentials] +* TBD[Credentials] * xref:controller-projects[Projects] * xref:controller-inventories[Inventories] * Hosts \ No newline at end of file diff --git a/downstream/modules/platform/con-controller-role-based-access-controls.adoc b/downstream/modules/platform/con-controller-role-based-access-controls.adoc index c4d713e8f8..f15cfae2f0 100644 --- a/downstream/modules/platform/con-controller-role-based-access-controls.adoc +++ b/downstream/modules/platform/con-controller-role-based-access-controls.adoc @@ -2,6 +2,7 @@ = Role-based access controls +//Not sure whether this is still true. To edit and delete a workflow job template, you must have the administrator role. To create a workflow job template, you must be an organization administrator or a system administrator. However, you can run a workflow job template that contains job templates that you do not have permissions for. @@ -11,6 +12,6 @@ You must have `execute` access to a job template to add it to a workflow job tem You can also perform other tasks, such as making a duplicate copy or re-launching a workflow, depending on which permissions are granted to a user. You must have permissions to all the resources used in a workflow, such as job templates, before relaunching or making a copy. -For more information, see xref:con-controller-rbac[Role-based access controls]. +For more information, see link:{URLCentralAuth}/gw-managing-access[Managing access with role based access control]. -For more information on performing the tasks described in this section, see the link:http://docs.ansible.com/automation-controller/4.4/html/administration/index.html#ag-start[Administration Guide]. +For more information about performing the tasks described, see xref:controller-workflow-job-templates[Workflow job templates]. diff --git a/downstream/modules/platform/con-controller-scm-inventory-jobs.adoc b/downstream/modules/platform/con-controller-scm-inventory-jobs.adoc index 3e14b09a01..7c8c5a8964 100644 --- a/downstream/modules/platform/con-controller-scm-inventory-jobs.adoc +++ b/downstream/modules/platform/con-controller-scm-inventory-jobs.adoc @@ -4,6 +4,6 @@ When an inventory sourced from an SCM, for example git, is executed, the results are displayed in the *Output* tab. If used, the Ansible CLI displays the same information. This can be useful for debugging. -Use the icons in the navigation menu to relaunch (image:rightrocket.png[Rightrocket,15,15]), download (image:download.png[Download,15,15]) the job output, or delete (image:delete-button.png[Delete,15,15]) the job. +Use the navigation menu to btn:[Relaunch job], btn:[Cancel job], download image:download.png[Download,15,15] the job output, or delete image:delete-button.png[Delete,15,15] the job. -image::ug-results-for-scm-job.png[Results for SCM job] +//image::ug-results-for-scm-job.png[Results for SCM job] diff --git a/downstream/modules/platform/con-controller-settings.adoc b/downstream/modules/platform/con-controller-settings.adoc index abe9eaa377..9aedf99fa0 100644 --- a/downstream/modules/platform/con-controller-settings.adoc +++ b/downstream/modules/platform/con-controller-settings.adoc @@ -2,15 +2,16 @@ = The Settings menu -Configure global and system-level settings using the *Settings* menu. -The *Settings* menu provides access to {ControllerName} configuration settings. +You can configure some {ControllerName} options by using the *Settings* menu of the User Interface. -The *Settings* page enables administrators to configure the following: +The *Settings* page enables an administrator to configure the following: -* Authentication -* Jobs -* System-level attributes -* Customize the UI -* Product license information - -//include::settings-menu.adoc[] \ No newline at end of file +* link:{URLCentralAuth}/assembly-gw-settings#proc-controller-configure-subscriptions[Configuring subscriptions] +* link:{URLCentralAuth}/assembly-gw-settings#proc-settings-platform-gateway[{GatewayStart}] +* link:{URLCentralAuth}/assembly-gw-settings#proc-settings-user-preferences[User preferences] +//* link:{BaseURL}/documentation/red_hat_ansible_automation_platform/{PlatformVers}/html/configuring_automation_execution/index#proc-controller-configure-subscriptions[System] +* link:{URLControllerAdminGuide}/controller-config#controller-configure-jobs[Configuring jobs] +* link:{URLControllerAdminGuide}/assembly-controller-logging-aggregation#proc-controller-set-up-logging[Setting up logging] +* link:{URLCentralAuth}/assembly-gw-settings#proc-settings-troubleshooting[Troubleshooting options] +// [emcwhinn] Analytics has its own section in 2.5 UI +//* link:{BaseURL}/documentation/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/assembly-ag-controller-config#proc-controller-configure-analytics[{Analytics}] diff --git a/downstream/modules/platform/con-controller-understand-architecture.adoc b/downstream/modules/platform/con-controller-understand-architecture.adoc index 7319c94cc7..a7dab89548 100644 --- a/downstream/modules/platform/con-controller-understand-architecture.adoc +++ b/downstream/modules/platform/con-controller-understand-architecture.adoc @@ -3,7 +3,7 @@ = Understand the architecture of {PlatformNameShort} and {ControllerName} {PlatformNameShort} and {ControllerName} comprise a general-purpose, declarative automation platform. -That means that once an Ansible playbook is launched (by {ControllerName}, or directly on the command line), the playbook, inventory, and credentials provided to Ansible are considered to be the source of truth. +That means that when an Ansible Playbook is launched (by {ControllerName}, or directly on the command line), the playbook, inventory, and credentials provided to Ansible are considered to be the source of truth. If you want policies around external verification of specific playbook content, job definition, or inventory contents, you must complete these processes before the automation is launched, either by the {ControllerName} web UI, or the {ControllerName} API. The use of source control, branching, and mandatory code review is best practice for Ansible automation. diff --git a/downstream/modules/platform/con-controller-views.adoc b/downstream/modules/platform/con-controller-views.adoc index c13cdb4fc7..8c605b7abd 100644 --- a/downstream/modules/platform/con-controller-views.adoc +++ b/downstream/modules/platform/con-controller-views.adoc @@ -1,16 +1,15 @@ [id="con-controller-views"] = Views +//No longer required in 2.5 version The {ControllerName} UI provides several options for viewing information. * xref:proc-controller-viewing-dashboard[Dashboard view] -* xref:proc-controller-jobs-view[Jobs view] -//The following aren't included in the Views menu for the tech preview. +* xref:proc-controller-jobs-view[Jobs view] * xref:proc-controller-schedules-view[Schedules view] * xref:proc-controller-activity-stream[Activity Stream] * xref:proc-controller-workflow-approvals[Workflow Approvals] -//Host Metrics is included in the Analytics menu * xref:proc-controller-host-metrics[Host Metrics] include::proc-controller-viewing-dashboard.adoc[leveloffset=+1] diff --git a/downstream/modules/platform/con-controller-work-with-notifications.adoc b/downstream/modules/platform/con-controller-work-with-notifications.adoc index 88f92c846d..5c0e9d5bab 100644 --- a/downstream/modules/platform/con-controller-work-with-notifications.adoc +++ b/downstream/modules/platform/con-controller-work-with-notifications.adoc @@ -3,12 +3,12 @@ = Work with notifications From the navigation panel, select {MenuAEAdminJobNotifications}. -This enables you to review any notification integrations you have set up and their statuses, if they have run. +You can review any notification integrations you have set up and their statuses, if they have run. -image::ug-job-template-completed-notifications-view.png[Job template completed notifications] +//image::ug-job-template-completed-notifications-view.png[Job template completed notifications] Use the toggles to enable or disable the notifications to use with your particular template. -For more information, see xref:controller-enable-disable-notifications[Enable and Disable Notifications]. +For more information, see xref:controller-enable-disable-notifications[Enable and disable notifications]. If no notifications have been set up, click btn:[Add notifier] to create a new notification. -For more information about configuring various notification types and extended messaging, see xref:controller-notification-types[Notification Types]. +For more information about configuring various notification types and extended messaging, see xref:controller-notification-types[Notification types]. diff --git a/downstream/modules/platform/con-controller-workflow-visualizer.adoc b/downstream/modules/platform/con-controller-workflow-visualizer.adoc index 941faea527..7976041bad 100644 --- a/downstream/modules/platform/con-controller-workflow-visualizer.adoc +++ b/downstream/modules/platform/con-controller-workflow-visualizer.adoc @@ -3,4 +3,4 @@ = Workflow visualizer The Workflow Visualizer provides a graphical way of linking together job templates, workflow templates, project syncs, and inventory syncs to build a workflow template. -Before you build a workflow template, see the xref:controller-workflows[Workflows] section for considerations associated with various scenarios on parent, child, and sibling nodes. +Before you build a workflow template, see the xref:controller-workflows[Workflows in {ControllerName}] section for considerations associated with various scenarios on parent, child, and sibling nodes. diff --git a/downstream/modules/platform/con-declaring-variables.adoc b/downstream/modules/platform/con-declaring-variables.adoc index f2ce09e0f0..b97c8fd0fc 100644 --- a/downstream/modules/platform/con-declaring-variables.adoc +++ b/downstream/modules/platform/con-declaring-variables.adoc @@ -31,4 +31,6 @@ The YAML inventory plugin processes variable values consistently and correctly. If a parameter value in the Ansible inventory file contains special characters, such as #, { or }, you must double-escape the value (that is enclose the value in both single and double quotation marks). -For example, to use `mypasswordwith#hashsigns` as a value for the variable `pg_password`, declare it as `pg_password='"mypasswordwith#hashsigns"'` in the Ansible host inventory file. \ No newline at end of file +For example, to use `mypasswordwith#hashsigns` as a value for the variable `pg_password`, declare it as `pg_password='"mypasswordwith#hashsigns"'` in the Ansible host inventory file. + +include::../aap-common/external-site-disclaimer.adoc[] \ No newline at end of file diff --git a/downstream/modules/platform/con-eda-2-5-with-controller-2-4.adoc b/downstream/modules/platform/con-eda-2-5-with-controller-2-4.adoc new file mode 100644 index 0000000000..f70df85dcb --- /dev/null +++ b/downstream/modules/platform/con-eda-2-5-with-controller-2-4.adoc @@ -0,0 +1,61 @@ +:_newdoc-version: 2.18.3 +:_template-generated: 2024-07-05 + +:_mod-docs-content-type: CONCEPT + +[id="eda-2-5-with-controller-2-4_{context}"] += {EDAName} 2.5 with controller 2.4 + +Use the following example to populate the inventory file to deploy a new single instance of {EDAName} 2.5 with controller 2.4. For {EDAName}, the requirements for a connection to controller are the `automation_controller_main_url` pointing to the 2.4 controller URL. + + +---- +[automationedacontroller] +eda.example.org + +[automationgateway] +eda.example.org + +[database] +data.example.com + +[all:vars] + +automationedacontroller_admin_password='' + +automationedacontroller_pg_host='data.example.com' +automationedacontroller_pg_port=5432 + +automationedacontroller_pg_database='automationedacontroller' +automationedacontroller_pg_username='automationedacontroller' +automationedacontroller_pg_password='' +automationedacontroller_pg_sslmode='prefer' + + +automation_controller_main_url='automationcontroller.example.org ' +#automationedacontroller_controller_verify_ssl=true <1> + +registry_url='registry.redhat.io' +registry_username='' +registry_password='' + +automationgateway_admin_password='' + +automationgateway_pg_host='data.example.com' +automationgateway_pg_port=5432 + +automationgateway_pg_database='automationgateway' +automationgateway_pg_username='automationgateway' +automationgateway_pg_password='' +automationgateway_pg_sslmode='prefer' +---- + +<1> This variable will set whether or not TLS will be verified. It is set to true by default, but if not needed be set to false. + +[NOTE] +==== +* Keep `controller` out of the inventory file. Ensure that `[automationcontroller]` is an empty group. +* Only add an {EDAName} 2.5 server. Do not add an {EDAName} 2.4 server since there is no upgrade option available. +==== + + diff --git a/downstream/modules/platform/con-edge-manager-access-devices.adoc b/downstream/modules/platform/con-edge-manager-access-devices.adoc new file mode 100644 index 0000000000..89e02c144b --- /dev/null +++ b/downstream/modules/platform/con-edge-manager-access-devices.adoc @@ -0,0 +1,7 @@ +[id="edge-manager-access-devices"] + += Accessing devices remotely + +For troubleshooting an edge device, a user can be authorized to remotely connect to that device's console through the agent. +This does not require an SSH connection and works even if that device is on a private network (behind a NAT), has a dynamic IP address, or has its SSH service disabled. + diff --git a/downstream/modules/platform/con-editing-inventory-files.adoc b/downstream/modules/platform/con-editing-inventory-files.adoc index da7ca97038..9738910f21 100644 --- a/downstream/modules/platform/con-editing-inventory-files.adoc +++ b/downstream/modules/platform/con-editing-inventory-files.adoc @@ -6,4 +6,4 @@ You can further configure your {PlatformName} installation by including addition These configurations add optional features for managing your {PlatformName}. Add these variables by editing the inventory file using a text editor. -A table of predefined values for inventory file variables can be found in link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/appendix-inventory-files-vars[Inventory file variables] in the _{PlatformName} Installation Guide_. \ No newline at end of file +A table of predefined values for inventory file variables can be found in link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/rpm_installation/appendix-inventory-files-vars[Inventory file variables] in the _{PlatformName} Installation Guide_. \ No newline at end of file diff --git a/downstream/modules/platform/con-gs-ansible-content.adoc b/downstream/modules/platform/con-gs-ansible-content.adoc new file mode 100644 index 0000000000..5eb96caa8d --- /dev/null +++ b/downstream/modules/platform/con-gs-ansible-content.adoc @@ -0,0 +1,6 @@ +[id="con-gs-ansible-content_{context}"] + += About automation content + +Use the following Ansible concepts to create successful Ansible Playbooks and {ExecEnvName} before beginning your Ansible development project. + diff --git a/downstream/modules/platform/con-gs-ansible-lightspeed.adoc b/downstream/modules/platform/con-gs-ansible-lightspeed.adoc new file mode 100644 index 0000000000..8913dc0693 --- /dev/null +++ b/downstream/modules/platform/con-gs-ansible-lightspeed.adoc @@ -0,0 +1,8 @@ +[id="con-gs-ansible-lightspeed"] + += {LightspeedShortName} + +{LightspeedFullName} is a generative AI service designed by and for Ansible platform engineers and developers. +It accepts natural-language prompts entered by a user and then interacts with IBM watsonx foundation models to produce code recommendations built on Ansible best practices. + +{LightspeedFullName} helps automation teams learn, create, and maintain {PlatformName} content more efficiently. diff --git a/downstream/modules/platform/con-gs-ansible-roles.adoc b/downstream/modules/platform/con-gs-ansible-roles.adoc new file mode 100644 index 0000000000..14e9409ac9 --- /dev/null +++ b/downstream/modules/platform/con-gs-ansible-roles.adoc @@ -0,0 +1,9 @@ +[id="con-gs-ansible-roles_{context}"] + += Bundle content with Ansible roles + +A role is like a customized piece of automation content that bundles together relevant bits from playbooks to fit your system's specific needs. Roles are self-contained and portable, and can include groupings of tasks, variables, configuration templates, handlers, and other supporting files to orchestrate complicated automation flows. + +Instead of creating huge playbooks with hundreds of tasks, you can use roles to break the tasks apart into smaller, more discrete units of work. + +To learn more about roles, see link:https://www.redhat.com/en/topics/automation/what-is-an-ansible-role[What is an Ansible Role-and how is it used?]. diff --git a/downstream/modules/platform/con-gs-auto-dev-about-inv.adoc b/downstream/modules/platform/con-gs-auto-dev-about-inv.adoc new file mode 100644 index 0000000000..8fbb194670 --- /dev/null +++ b/downstream/modules/platform/con-gs-auto-dev-about-inv.adoc @@ -0,0 +1,39 @@ +[id="con-gs-auto-dev-about-inv"] + += About inventories + +An inventory is a file listing the collection of hosts managed by {PlatformNameShort}. +Organizations are assigned to inventories, while permissions to launch playbooks against inventories are controlled at the user or team level. + +== Browsing and creating inventories + +You can find inventories in the UI by navigating to {MenuInfrastructureInventories}. The Inventories window displays a list of the inventories that are currently available. You can sort the inventory list by name and search by inventory type, organization, description, inventory creators or modifiers, or additional criteria. +Use the following procedure to create a new inventory. + +.Procedure + +. From the navigation panel, select {MenuInfrastructureInventories}. The *Inventories* view displays a list of the inventories currently available. +. Click btn:[Create inventory], and from the list menu select the type of inventory you want to create. +. Enter the appropriate details into the following fields: +* *Name*: Enter a name for the inventory. +* Optional: *Description*: Enter a description. +* *Organization*: Choose among the available organizations. +* Only applicable to Smart Inventories: *Smart Host Filter*: Filters are similar to tags in that tags are used to filter certain hosts that contain those names. Therefore, to populate this field, specify a tag that contains the hosts you want, not the hosts themselves. Filters are case-sensitive. For more information, see link:{URLControllerUserGuide}/controller-inventories#ref-controller-smart-host-filter[Smart host filters] in the {TitleControllerUserGuide} guide. +* *Instance groups*: Select the instance group or groups for this inventory to run on. If the list is extensive, use the search to narrow the options. You can select multiple instance groups and sort them in the order that you want them run. +* Optional: *Labels*: Add labels that describe this inventory, so they can be used to group and filter inventories and jobs. +* Only applicable to constructed inventories: *Input inventories*: Specify the source inventories to include in this constructed inventory. Empty groups from input inventories are copied into the constructed inventory. +* Optional and only applicable to constructed inventories: *Cache timeout (seconds)*:Set the length of time you want the cache plugin data to timeout. +* Only applicable to constructed inventories: *Verbosity*: Control the level of output that Ansible produces as the playbook executes related to inventory sources associated with constructed inventories. Select the verbosity from Normal to various Verbose or Debug settings. This only appears in the "details" report view. +** Verbose logging includes the output of all commands. +** Debug logging is exceedingly verbose and includes information on SSH operations that can be useful in certain support instances. Most users do not need to see debug mode output. +* Only applicable to constructed inventories: *Limit*: Restricts the number of returned hosts for the inventory source associated with the constructed inventory. You can paste a group name into the limit field to only include hosts in that group. For more information, see the *Source variables* setting. +* Only applicable to standard inventories: *Options*: Check the box next to *Prevent instance group fallback* to enable only the instance groups listed in the *Instance groups* field to execute the job. If unchecked, all available instances in the execution pool will be used based on the hierarchy described in link:{URLControllerAdminGuide}/controller-clustering#controller-cluster-job-runs[Control where a job runs] in the {TitleControllerAdminGuide} guide. Click the tooltip for more information. ++ +NOTE: Set the `prevent_instance_group_fallback` option for smart inventories through the API. ++ +* *Variables* (*Source variables* for constructed inventories): +** *Variables*: Variable definitions and values to apply to all hosts in this inventory. Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two. +** *Source variables* for constructed inventories are used to configure the constructed inventory plugin. Source variables create groups under the `groups` data key. The variable accepts Jinja2 template syntax, renders it for every host, makes a `true` or `false` evaluation, and includes the host in the group (from the key of the entry) if the result is `true`. +. Click btn:[Create inventory]. + +After creating the new inventory, you can proceed with configuring permissions, groups, hosts, sources, and viewing completed jobs, if applicable to the type of inventory. \ No newline at end of file diff --git a/downstream/modules/platform/con-gs-auto-dev-job-templates.adoc b/downstream/modules/platform/con-gs-auto-dev-job-templates.adoc new file mode 100644 index 0000000000..bcb03ea859 --- /dev/null +++ b/downstream/modules/platform/con-gs-auto-dev-job-templates.adoc @@ -0,0 +1,7 @@ +[id="con-gs-auto-dev-job-templates"] + += Work with job templates + +A job template is a definition and set of parameters for running an Ansible job. + +A job template combines an Ansible playbook from a project and the settings required to launch it, including information about the target host against which the playbook will run, authentication information to access the host, and any other relevant variables . Job templates are useful to run the same job many times. Job templates also encourage the reuse of Ansible playbook content and collaboration between teams. For more information, see Job Templates in the Automation controller User Guide. diff --git a/downstream/modules/platform/con-gs-auto-op-about-inv.adoc b/downstream/modules/platform/con-gs-auto-op-about-inv.adoc new file mode 100644 index 0000000000..bb6f0d4eb0 --- /dev/null +++ b/downstream/modules/platform/con-gs-auto-op-about-inv.adoc @@ -0,0 +1,9 @@ +[id="con-gs-auto-op-about-inv"] + += About inventories + +An inventory is a file listing the collection of hosts managed by {PlatformNameShort}. +Organizations are assigned to inventories, while permissions to launch playbooks against inventories are controlled at the user or team level. + +Platform administrators and automation developers have the permissions to create inventories. +As an automation operator you can view inventories and their details. diff --git a/downstream/modules/platform/con-gs-auto-op-execute-inv.adoc b/downstream/modules/platform/con-gs-auto-op-execute-inv.adoc new file mode 100644 index 0000000000..6bbefd2eda --- /dev/null +++ b/downstream/modules/platform/con-gs-auto-op-execute-inv.adoc @@ -0,0 +1,19 @@ +[id="con-gs-auto-op-execute-inv"] + += Executing an inventory + +.Procedure + +. From the navigation panel, select {MenuInfrastructureInventories}. +The *Inventories* window displays a list of inventories that are currently available, along with the following information: +* *Name*: The inventory name. +* *Status*: The statuses are: +** *Success*: The inventory sync completed successfully. +** *Disabled*: No inventory source added to the inventory. +** *Error*: The inventory source completed with error. +* *Type*: Identifies whether the inventory is a standard inventory, a smart inventory, or a constructed inventory. +* *Organization*: The organization to which the inventory belongs. +. Select an inventory name to display the *Details* page for the inventory, including the inventory's groups and hosts. + +For more information about inventories, see the link:{URLControllerUserGuide}/controller-inventories[Inventories] section of the {TitleControllerUserGuide} guide. + diff --git a/downstream/modules/platform/con-gs-auto-op-job-templates.adoc b/downstream/modules/platform/con-gs-auto-op-job-templates.adoc new file mode 100644 index 0000000000..cdb47c1f29 --- /dev/null +++ b/downstream/modules/platform/con-gs-auto-op-job-templates.adoc @@ -0,0 +1,9 @@ +[id="con-gs-auto-op-job-templates"] + += Work with job templates + +A job template is a definition and set of parameters for running an Ansible job. + +A job template combines an Ansible Playbook from a project with the settings required to launch the job. Job templates are useful for running the same job many times. Job templates also encourage the reuse of Ansible Playbook content and collaboration between teams. For more information, see link:{URLControllerUserGuide}/controller-job-templates[Job Templates] in the {TitleControllerUserGuide} guide. + +Platform administrators and automation developers have the permissions to create job templates. As an automation operator you can launch job templates and view their details. diff --git a/downstream/modules/platform/con-gs-automation-content.adoc b/downstream/modules/platform/con-gs-automation-content.adoc new file mode 100644 index 0000000000..f7b42d2de8 --- /dev/null +++ b/downstream/modules/platform/con-gs-automation-content.adoc @@ -0,0 +1,57 @@ +[id="con-gs-automation-content"] + += Automation content + +{HubNameStart} is the central location for your {PlatformNameShort} content. +In {HubName} you can also find content collections that you can download and integrate into your automation environment. You can also create and upload your own content to distribute to your users. + +An Ansible Content Collection is a ready-to-use toolkit for automation and can include multiple types of content, including roles, modules, playbooks, and plugins all in one place. + +You can access {HubName} in one of two ways: + +* On the Red Hat-hosted link:https://console.redhat.com/[Hybrid Cloud Console], where you can find Red Hat validated or certified content that you can sync to your platform environment. +* On a self-hosted, on-premise {PrivateHubName}, where you can curate content for your automation users and manage access to collections and {ExecEnvShort}s. + +Depending on the way you access {HubName}, you may have access to different types of content collections. + +There are two types of Red Hat Ansible content: + +* {CertifiedName}, which Red Hat builds, supports, and maintains. +Certified collections are included in your subscription to {PlatformName} and can be found in {HubName}. +* {Valid} collections, which are customizable and therefore do not have a support guarantee, but have been tested in the {PlatformNameShort} environment. + +For more information about Ansible content, see xref:con-gs-create-automation-content[Create automation content] in xref:assembly-gs-auto-dev[Getting started as an automation developer]. + +== Ansible roles + +Ansible roles allow you to create reusable automation content that helps teams to work more efficiently and avoid duplicating efforts. +With roles, you can group together a broader range of existing automation content, like playbooks, configuration files, templates, tasks, and handlers to create customized automation content that can be reused and shared with others. + +You can also make roles configurable by exposing variables that users can set when calling the role, allowing them to configure their system according to their organization's requirements. + +Roles are generally included in Ansible content collections. + +.Additional resources + +For more information, see xref:con-gs-ansible-roles_assembly-gs-auto-dev[Bundle content with Ansible roles]. + +== Ansible playbooks + +Playbooks are YAML files that contain specific sets of human-readable instructions, or “plays,” that you send to run on a single target or groups of targets. +Ansible playbooks are repeatable and reusable configuration management tools designed to deploy complex applications. + +You can use playbooks to manage configurations of and deployments to remote machines to sequence multitiered rollouts involving rolling updates. Use playbooks to delegate actions to other hosts, interacting with monitoring servers and load balancers along the way. + +Once written, you can use and re-use playbooks for automation across your enterprise. +For example, if you need to run a task more than once, write a playbook and put it under source control. +Then, you can use the playbook to push out new configuration or confirm the configuration of remote systems. + +Ansible playbooks can declare configurations, orchestrate steps of any manually ordered process on many machines in a defined order, or start tasks synchronously or asynchronously. + +You can also use {LightspeedShortName}, Ansible's generative AI service, to create and develop playbooks to fit your needs. See the link:https://docs.redhat.com/en/documentation/red_hat_ansible_lightspeed_with_ibm_watsonx_code_assistant/2.x_latest/html/red_hat_ansible_lightspeed_with_ibm_watsonx_code_assistant_user_guide/index[Ansible Lightspeed documentation] for more information. + +.Additional resources + +* link:{LinkPlaybooksGettingStarted} +* link:https://docs.redhat.com/en/documentation/red_hat_ansible_lightspeed_with_ibm_watsonx_code_assistant/2.x_latest/html/red_hat_ansible_lightspeed_with_ibm_watsonx_code_assistant_user_guide/index[{LightspeedFullName} user guide] + diff --git a/downstream/modules/platform/con-gs-automation-decisions.adoc b/downstream/modules/platform/con-gs-automation-decisions.adoc new file mode 100644 index 0000000000..808c20e0da --- /dev/null +++ b/downstream/modules/platform/con-gs-automation-decisions.adoc @@ -0,0 +1,11 @@ +[id="con-gs-automation-decisions"] + += Automation decisions + +{PlatformName} includes {EDAName}, an automation engine that listens to your system's event stream and reacts to events that you have specified with targeted automation tasks. +In this way, {EDAName} manages routine automation tasks and responses, freeing you up to work on more complex tasks. + +Managed through {EDAcontroller}, Ansible rulebooks are the framework for automation decisions. Ansible rulebooks are collections of rulesets, which in turn consist of one or more sources, rules, and conditions. Rulebooks tell the system what events to flag and how to respond to them. From the Automation Decisions section of the platform user interface, you can use rulebooks to connect and listen to event sources, and define actions that are triggered in response to certain events. + +.Additional resources +For more information about rulebook, events, and sources, see xref:con-gs-define-events-rulebooks[Rulebook actions]. diff --git a/downstream/modules/platform/con-gs-automation-execution-jobs.adoc b/downstream/modules/platform/con-gs-automation-execution-jobs.adoc new file mode 100644 index 0000000000..8dd456c371 --- /dev/null +++ b/downstream/modules/platform/con-gs-automation-execution-jobs.adoc @@ -0,0 +1,5 @@ +[id="con-gs-automation-execution-jobs"] + += Automation execution jobs + +A job is an instance of {PlatformNameShort} launching an Ansible Playbook against an inventory of hosts. diff --git a/downstream/modules/platform/con-gs-automation-execution.adoc b/downstream/modules/platform/con-gs-automation-execution.adoc new file mode 100644 index 0000000000..0ab777c9af --- /dev/null +++ b/downstream/modules/platform/con-gs-automation-execution.adoc @@ -0,0 +1,14 @@ +[id="con-gs-automation-execution"] + += Automation execution + +The centerpiece of {PlatformNameShort} is its automation execution command and control center, where you can deploy, define, operate, scale and delegate automation across your enterprise. +With this functionality, you can perform a variety of tasks from a single location, such as running playbooks from a simple, straightforward web UI, monitoring dashboard activity, and centralized logging to manage and track job execution. + +In the automation execution environment, you can use {ControllerName} tasks to build job templates, which standardize how automation is deployed, initiated, and delegated, making it more reusable and consistent. + +== Inventories + +An inventory is a single file, usually in INI or YAML format, containing a list of hosts and groups that can be acted upon using Ansible commands and playbooks. +You can use an inventory file to specify your installation scenario and describe host deployments to Ansible. +You can also use an inventory file to organize managed nodes in centralized files that give Ansible with system information and network locations. diff --git a/downstream/modules/platform/con-gs-automation-mesh.adoc b/downstream/modules/platform/con-gs-automation-mesh.adoc new file mode 100644 index 0000000000..353ddba05a --- /dev/null +++ b/downstream/modules/platform/con-gs-automation-mesh.adoc @@ -0,0 +1,9 @@ +[id="con-gs-automation-mesh"] + += {AutomationMeshStart} + +{AutomationMeshStart} is an overlay network intended to ease the distribution of automation across a collection of execution nodes using existing connectivity. +Execution nodes are where link:https://www.redhat.com/en/topics/automation/what-is-an-ansible-playbook[Ansible Playbooks] are actually executed. +A node runs an {ExecEnvNameSing} which, in turn, runs the Ansible Playbook. +{AutomationMeshStart} creates peer-to-peer connections between these execution nodes, increasing the resiliency of your automation workloads to network latency and connection disruptions. +This also permits more flexible architectures and provides rapid, independent scaling of control and execution capacity. diff --git a/downstream/modules/platform/con-gs-build-decision-env.adoc b/downstream/modules/platform/con-gs-build-decision-env.adoc new file mode 100644 index 0000000000..d6ece6b515 --- /dev/null +++ b/downstream/modules/platform/con-gs-build-decision-env.adoc @@ -0,0 +1,18 @@ +[id="con-gs-build-decision-env"] + += Build and use a decision environment + +{EDAName} includes an ansible.eda collection, which contains sample sources, event filters and rulebooks. +All the collections, ansible rulebooks and their dependencies use a decision environment, which is an image that can be run on either Podman or Kubernetes. + +In decision environments, sources, which are typically Python code, are distributed through ansible-collections. +They inject external events into a rulebook for processing. +The rulebook consists of the following: + +* The python interpreter +* Java Runtime Environment for Drools rule engine +* ansible-rulebook python package +* ansible.eda collection + +You can use the base decision environment and build your own customized Decision Environments with additional collections and collection dependencies. +You can build a decision environment using a Dockerfile or optionally you can deploy your CA certificate into the image. diff --git a/downstream/modules/platform/con-gs-config-authentication.adoc b/downstream/modules/platform/con-gs-config-authentication.adoc new file mode 100644 index 0000000000..f325929040 --- /dev/null +++ b/downstream/modules/platform/con-gs-config-authentication.adoc @@ -0,0 +1,9 @@ +[id="con-gs-config-authentication"] + += Configure authentication + +After your first login as an administrator you must configure authentication for your users. +Depending on your organization's needs and resources, you can either: + +* Set up authentication by creating users, teams, and organizations manually. +* Use an external source such as GitHub to configure authentication for your system. diff --git a/downstream/modules/platform/con-gs-create-automation-content.adoc b/downstream/modules/platform/con-gs-create-automation-content.adoc new file mode 100644 index 0000000000..2fd6a215c2 --- /dev/null +++ b/downstream/modules/platform/con-gs-create-automation-content.adoc @@ -0,0 +1,43 @@ +[id="con-gs-create-automation-content"] + += Create automation content with playbooks + +Ansible playbooks are blueprints that tell {PlatformNameShort} what tasks to perform with which devices. +You can use a playbook to define the automation tasks that you want the platform to run. + +== Create a playbook + +A playbook contains one or more plays. A basic play contains the following parameters: + +* *Name*: a brief description of the overall function of the playbook, which assists in keeping it readable and organized for all users. +* *Hosts*: identifies the target or targets for Ansible to run against. +* *Become statements*: this optional statement can be set to `true` or `yes` to enable privilege escalation using a become plugin (such as `sudo`, `su`, `pfexec`, `doas`, `pbrun`, `dzdo`, `ksu`). +* *Tasks*: this is the list of actions that get executed against each host in the play. + +Here is an example of a play in a playbook. You can see the name of the play, the host, and the list of tasks included in the play. + +[source,bash] +---- +- name: Set Up a Project and Job Template + hosts: host.name.ip + become: true + + tasks: + - name: Create a Project + ansible.controller.project: + name: Job Template Test Project + state: present + scm_type: git + scm_url: https://github.com/ansible/ansible-tower-samples.git + + - name: Create a Job Template + ansible.controller.job_template: + name: my-job-1 + project: Job Template Test Project + inventory: Demo Inventory + playbook: hello_world.yml + job_type: run + state: present +---- + +For more detailed instructions on authoring playbooks, see link:{LinkDevelopAutomationContent}, or consult our documentation on link:{LinkLightspeedUserGuide} to learn how to generate a playbook with AI assistance. \ No newline at end of file diff --git a/downstream/modules/platform/con-gs-dashboard-components.adoc b/downstream/modules/platform/con-gs-dashboard-components.adoc new file mode 100644 index 0000000000..7266a79f4b --- /dev/null +++ b/downstream/modules/platform/con-gs-dashboard-components.adoc @@ -0,0 +1,39 @@ +[id="con-gs-dashboard-components"] + += Dashboard components + +image::AAP_dashboard_2.5.png[Dashboard] + +After you install {PlatformNameShort} on your system and log in for the first time, familiarize yourself with the platform dashboard. + +Quick starts:: +You can learn about Ansible automation functions with guided tutorials called quick starts. +In the dashboard, you can access quick starts by selecting a quick start card. +From the panel displayed, click btn:[Start] and complete the onscreen instructions. +You can also filter quick starts by keyword and status. + +Resource status:: +Indicates the status of your hosts, projects, and inventories. +The status indicator links to your configured hosts, projects and inventories where you can search, filter, add and change these resources. + +Job Activity:: +You can view a summary of your current job status. +Filter the job status within a period of time or by job type, or click btn:[Go to jobs] to view a complete list of jobs that are currently available. + +Jobs:: +You can view recent jobs that have run, or click btn:[View all Jobs] to view a complete list of jobs that are currently available, or create a new job. + +Projects:: +You can view recently updated projects or click btn:[View all Projects] to view a complete list of the projects that are currently available, or create a new project. + +Inventories:: +You can view recently updated inventories or click btn:[View all Inventories] to view a complete list of available inventories, or create a new inventory. + +Rulebook Activations:: +You can view the list of recent rulebook activations and their status, display the complete list of rulebook activations that are currently available, or create a new rulebook activation. + +Rule Audit:: +You view recently fired rule audits, view rule audit records, and view rule audit data based on corresponding rulebook activation runs. + +Decision Environments:: +You can view recently updated decision environments, or click btn:[View all Decision Environments] to view a complete list of available inventories, or create a new decision environment. diff --git a/downstream/modules/platform/con-gs-define-events-rulebooks.adoc b/downstream/modules/platform/con-gs-define-events-rulebooks.adoc new file mode 100644 index 0000000000..a02fac24ff --- /dev/null +++ b/downstream/modules/platform/con-gs-define-events-rulebooks.adoc @@ -0,0 +1,25 @@ +[id="con-gs-define-events-rulebooks"] + += Define events with rulebooks + +An Ansible rulebook is a collection of rulesets that references one or more sources, rules, and conditions. + +Rulebooks are to {EDAName} what playbooks are to {PlatformNameShort} as a whole. +Like a playbook, a rulebook defines automation tasks for the platform, along with when they should be triggered. + +== Rulebook actions + +Rulebooks use an "if-this-then-that” logic that tells {EDAName} what actions to activate when a rule is triggered. {EDAName} listens to the controller event stream and, when an event triggers a rule, activates an automation action in response. + +Rulebooks can trigger the following activations: + +* `run_job_template` +* `run_playbook` (only supported with ansible-rulebook CLI) +* `debug` +* `print_event` +* `set_fact` +* `post_event` +* `retract_fact` +* `shutdown` + +To read more about rulebook activations, see link:https://ansible.readthedocs.io/projects/rulebook/en/latest/actions.html[Actions] in the Ansible Rulebook documentation. \ No newline at end of file diff --git a/downstream/modules/platform/con-gs-developer-tools.adoc b/downstream/modules/platform/con-gs-developer-tools.adoc new file mode 100644 index 0000000000..46e382b356 --- /dev/null +++ b/downstream/modules/platform/con-gs-developer-tools.adoc @@ -0,0 +1,6 @@ +[id="con-gs-developer-tools"] + += {ToolsName} + +{ToolsName} are an integrated and supported suite of capabilities that help IT practitioners at any skill level generate automation content faster than they might with manual coding. +{ToolsName} can help you create, test, and deploy automation content like playbooks, {ExecEnvShort}s, and collections quickly and accurately using recommended practices. For more information on how {ToolsName} can help you create automation content, see our documentation on link:{LinkDevelopAutomationContent}. \ No newline at end of file diff --git a/downstream/modules/platform/con-gs-execution-env.adoc b/downstream/modules/platform/con-gs-execution-env.adoc new file mode 100644 index 0000000000..117475ee9f --- /dev/null +++ b/downstream/modules/platform/con-gs-execution-env.adoc @@ -0,0 +1,20 @@ +[id="con-gs-execution-env_{context}"] + += Build and use an {ExecEnvShort} + +All automation in Red Hat Ansible Automation Platform runs on container images called {ExecEnvName}. + +{ExecEnvNameStart} are consistent and shareable container images that serve as Ansible control nodes. +{ExecEnvNameStart} reduce the challenge of sharing Ansible content that has external dependencies. +If automation content is like a script that a developer has written, an automation {ExecEnvShort} is like a replica of that developer's environment, thereby enabling you to reproduce and scale the automation content that the developer has written. In this way, {ExecEnvShort}s make it easier for you to implement automation in a range of environments. + +{ExecEnvNameStart}s contain: + +* Ansible Core +* {Runner} +* Ansible Collections +* Python libraries +* System dependencies +* Custom user needs + +You can either use the default base {ExecEnvShort} included in your {PlatformNameShort} subscription, or you can define and create an {ExecEnvNameSing} using {Builder}. \ No newline at end of file diff --git a/downstream/modules/platform/con-gs-final-set-up.adoc b/downstream/modules/platform/con-gs-final-set-up.adoc new file mode 100644 index 0000000000..9320c4e150 --- /dev/null +++ b/downstream/modules/platform/con-gs-final-set-up.adoc @@ -0,0 +1,13 @@ +[id="con-gs-final-set-up"] + += Using this guide + +After you have installed {PlatformNameShort} {PlatformVers} and have become familiar with the dashboard, use this document to explore further options for setup and daily use. +This guide is structured so that you can select the path that is most appropriate to you and your role within your organization. +We also encourage you to explore the other paths outlined in this guide to learn how Ansible empowers users with various roles and objectives to build and customize automation tasks. + +Select one of the following paths to continue getting started: + +* If you are a systems administrator configuring authentication and setting up teams and organizations, see xref:assembly-gs-platform-admin[Getting started as a platform administrator]. +* If you are a developer setting up development environments, creating playbooks, rulebooks, roles, or projects, see xref:assembly-gs-auto-dev[Getting started as an automation developer]. +* If you are an operator using playbooks, publishing custom content, creating projects, and creating and using inventories, see xref:assembly-gs-auto-op[Getting started as an automation operator]. diff --git a/downstream/modules/platform/con-gs-learn-about-collections.adoc b/downstream/modules/platform/con-gs-learn-about-collections.adoc new file mode 100644 index 0000000000..8cb0fcb9e7 --- /dev/null +++ b/downstream/modules/platform/con-gs-learn-about-collections.adoc @@ -0,0 +1,12 @@ +[id="con-gs-learn-about-collections_{context}"] + += About content collections + +Ansible content collections are assemblages of automation content. There are two types of Ansible collections: + +* *{CertifiedName}*, which contain fully-supported roles and modules that are enterprise- and production-ready for use in your environments. +* *{Valid} collections*, which provide you with a trusted, expert-guided approach for performing foundational operations and tasks in your product. + +Both types of content collections can be found in {HubName} through the link:https://console.redhat.com/ansible/automation-hub/[Hybrid Cloud Console]. + + diff --git a/downstream/modules/platform/con-gs-manage-RBAC.adoc b/downstream/modules/platform/con-gs-manage-RBAC.adoc new file mode 100644 index 0000000000..c243a203a7 --- /dev/null +++ b/downstream/modules/platform/con-gs-manage-RBAC.adoc @@ -0,0 +1,13 @@ +[id="con-gs-manage-RBAC"] + += Managing user access with role-based access control + +Role-based access control (RBAC) restricts user access based on their role within an organization. +The roles in RBAC refer to the levels of access that users have to the network. + +You can control what users can do with the components of {PlatformNameShort} at a broad or granular level depending on your RBAC policy. +You can select whether the user is a system administrator or normal user and align roles and access permissions with their positions within the organization. + +You can define roles with many permissions that can then be assigned to resources, teams, and users. +The permissions that make up a role dictate what the assigned role allows. +Permissions are allocated with only the access needed for a user to perform the tasks appropriate for their role. diff --git a/downstream/modules/platform/con-gs-manage-collections.adoc b/downstream/modules/platform/con-gs-manage-collections.adoc new file mode 100644 index 0000000000..2516b85eb4 --- /dev/null +++ b/downstream/modules/platform/con-gs-manage-collections.adoc @@ -0,0 +1,12 @@ +[id="con-gs-manage-collections"] + += Manage collections in {HubName} + +As a platform operator, you can use namespaces in {HubName} to curate and manage collections for the following purposes: + +* Create groups with permissions to curate namespaces and upload collections to {PrivateHubName}. +* Add information and resources to the namespace to help end users of the collection in their automation tasks. +* Upload collections to the namespace. +* Review the namespace import logs to decide the success or failure of uploading the collection and its current approval status. + +For more information about collections, see _link:{LinkHubManagingContent}_. diff --git a/downstream/modules/platform/con-gs-playbooks.adoc b/downstream/modules/platform/con-gs-playbooks.adoc new file mode 100644 index 0000000000..11019573d2 --- /dev/null +++ b/downstream/modules/platform/con-gs-playbooks.adoc @@ -0,0 +1,12 @@ +[id="con-gs-playbooks"] + += Get started with playbooks + +A playbook runs tasks in order from top to bottom. Within each play, tasks also run in order from top to bottom. + +== Learn about playbooks + +Playbooks with multiple “plays” can orchestrate multi-machine deployments, running one play on your web servers, another play on your database servers, and a third play on your network infrastructure. + +For more information, see link:{LinkPlaybooksGettingStarted}. + diff --git a/downstream/modules/platform/con-gs-rulebook-activations.adoc b/downstream/modules/platform/con-gs-rulebook-activations.adoc new file mode 100644 index 0000000000..d0aa36768d --- /dev/null +++ b/downstream/modules/platform/con-gs-rulebook-activations.adoc @@ -0,0 +1,5 @@ +[id="con-gs-rulebook-activations"] + += Create and run a rulebook activation + +In {EDAName}, a rulebook activation is a process running in the background defined by a decision environment executing a specific rulebook. diff --git a/downstream/modules/platform/con-gs-setting-up-dev-env.adoc b/downstream/modules/platform/con-gs-setting-up-dev-env.adoc new file mode 100644 index 0000000000..42ed47e033 --- /dev/null +++ b/downstream/modules/platform/con-gs-setting-up-dev-env.adoc @@ -0,0 +1,9 @@ +:_newdoc-version: 2.18.3 +:_template-generated: 2024-09-19 + +:_mod-docs-content-type: CONCEPT + +[id="setting-up-dev-env_{context}"] += Setting up your development environment + +Before you begin to create content, consult our guide to link:{LinkDevelopAutomationContent}. There you can find information on {ToolsName}, which you can integrate into your environment, and learn how to scaffold a playbook project. diff --git a/downstream/modules/platform/con-gw-activity-stream.adoc b/downstream/modules/platform/con-gw-activity-stream.adoc new file mode 100644 index 0000000000..2245e67a6d --- /dev/null +++ b/downstream/modules/platform/con-gw-activity-stream.adoc @@ -0,0 +1,12 @@ +:_mod-docs-content-type: CONCEPT + +[id="con-gw-activity-stream"] + += Activity stream +The {Gateway} includes an activity stream that captures changes to {Gateway} resources, such as the creation or modification of organizations, users, and service clusters, among others. For each change, the activity stream collects information about the time of the change, the user that initiated the change, the action performed, and the actual changes made to the object, when possible. The information gathered varies depending on the type of change. + +You can access the details captured by the activity stream from the API: + +----- +/api/gateway/v1/activitystream/ +----- diff --git a/downstream/modules/platform/con-gw-authenticator-map-examples.adoc b/downstream/modules/platform/con-gw-authenticator-map-examples.adoc new file mode 100644 index 0000000000..b55b2e5473 --- /dev/null +++ b/downstream/modules/platform/con-gw-authenticator-map-examples.adoc @@ -0,0 +1,15 @@ +:_mod-docs-content-type: CONCEPT + +[id="gw-authenticator-map-examples"] + += Authenticator map examples + +* Make this user a superuser if they have an attribute called `aap_superuser` with a value of _True_. +* Add this user to a team if they have the group `cn=Administrators,ou=AAP,ou=example,o=com` or `cn=Operators,ou=AAP,ou=example,o=com`. +* Never allow access to the system if the user has an attribute called `disabled` with a value of _True_, _Yes_ or _Until Further Notice_. + +Since maps are executed in order, it is possible to create exceptions. Expanding on the previous example for “Never allow access to the system if the user has an attribute called disabled with a value of _True_, _Yes_ or _Until Further Notice_. + +You can add another rule with a higher order, such as, “Allow access to the system for a `disabled` user if they are in the group `Emergency Contacts`.” + +The first rule prevents the disabled user from accessing the system, but the second rule alters that decision to grant access to the system for the disabled user if they are in the `Emergency Contacts` group. \ No newline at end of file diff --git a/downstream/modules/platform/con-gw-authenticator-map-triggers.adoc b/downstream/modules/platform/con-gw-authenticator-map-triggers.adoc new file mode 100644 index 0000000000..1c5958d2b6 --- /dev/null +++ b/downstream/modules/platform/con-gw-authenticator-map-triggers.adoc @@ -0,0 +1,49 @@ +:_mod-docs-content-type: CONCEPT + +[id="gw-authenticator-map-triggers"] + += Authenticator map triggers + +Each map has a trigger that defines when the map should be evaluated as true. Trigger types include the following: + +Always:: The trigger should always be fired. +Never:: The trigger should never be fired. +Group:: The map is true or false based on a user having, not having or having multiple groups in the source system. When defining a group trigger, the authentication mapping expands to include the following selections: ++ +* *Operation:* This field includes conditional settings that trigger the handling of the rule based on the specified *Groups* criteria. The choices include *and* and *or*. For example, if you select *and* the user logging in must be a member of all of the groups specified in the *Groups* field for this trigger to be true. Alternatively, if you select *or* the user logging in must be a member of any of the specified *Groups* in order for the trigger to fire. ++ +[NOTE] +==== +If you are only keying off one group it doesn’t matter if you select *"and"* or *"or"*. +==== ++ +* *Groups:* This is a list of one or more groups coming from the authentication system that the user must be a member of. See the *Operation* field to determine the behavior of the trigger if more than one group is specified in the trigger. ++ +[NOTE] +==== +Group identifiers are case-sensitive and must match the authenticator backend. For example, `cn=johnsmith,dc=example,dc=com` instead of `CN=johnsmith,DC=example,DC=com`. +==== ++ +Attribute:: The map is true or false based on a users attributes coming from the source system. When defining an attribute trigger, the authentication mapping expands to include the following selections: ++ +* *Operation:* This field includes conditional settings that trigger the handling of the rule based on the specified *Attribute* criteria. In version {PlatformVers} this field indicates what will happen if the source system returns a list of attributes instead of a single value. For example, if the source system returns multiple emails for a user and *Operation* was set to *and*, all of the given emails must match the *Comparison* for the trigger to be _True_. If *Operation* was set to *or*, any of the returned emails will set the trigger to _True_ if they match the *Comparison* in the trigger. ++ +[NOTE] +==== +If you would like to experiment with multiple attribute maps you can do that through the API but the UI form will remove multi-attribute maps if the authenticator is saved through the UI. When adding multiple attributes to a map, the *Operation* will also apply to the attributes. +==== ++ +* *Attribute:* The name of the attribute coming from the source system this trigger will be evaluated against. For example, if you wanted the trigger to fire based on the user's last name and the last name field in the source system was called `users_last_name` you would enter the value ‘users_last_name’ in this field. +* *Comparison:* Tells the trigger how to evaluate the value of the users. *Attribute* in the source system compared to the *Value* specified on the trigger. Available options are: *contains*, *matches*, *ends with*, *in*, or *equals*. Below is a breakdown of each *Comparison* type: ++ +** *contains*: The specified character sequence in *Value* is contained within the attributes value returned from the source. For example, given an attribute value of ‘John’ from the source the contains *Comparison* would set the trigger to _True_ if the trigger *Value* was set to ‘Jo’ and _False_ if the trigger *Value* was ‘Joy’. +** *matches*: The *Value* on the trigger is treated as a python regular expression and does an link:https://docs.python.org/3/library/re.html#re.match[Regular expression match (re.match)] (with case ignore on) between the specified *Value* and the value returned from the source system. For example, if the trigger's *Value* was ‘Jo’ the trigger would return _True_ if the value from the source was ‘John‘ or ‘Joanne‘ or any other value which matched the regular expression ‘Jo’. The trigger would return _False_ if the sources value for the attribute was ‘Dan’ because ‘Dan’ does not match the regular expression ‘Jo’. +** *ends with*: The trigger will see if the value provided by the source ends with the specified *Value* of the trigger. For example, if the source provided a value of ‘John’ the trigger would be _True_ if its *Value* was set to ‘n’ or ‘on’. The trigger would be _False_ if its *Value* was set to ‘z’ because the value ‘John’ coming from the source does not end with the value ’z’ specified by the trigger. +** *equal*: The trigger will see if the value provided by the source is equal to (in its entirety) the specified *Value* of the trigger. For example, if the source returned the value ‘John’, the trigger would be _True_ if its *Value* was set to ‘John’. Any value other than ‘John’ returned from the source would set this trigger to _False_. +** *in*: The *in* condition will see if the value matches one of several values. When *in* is specified as the *Comparison*, the *Value* field can be a comma separated list. For example, if a trigger had a *Value* of ‘John,Donna’ the trigger would be _True_ if the attribute coming from the source had either the value ‘John’ or ‘Donna’. Otherwise, the trigger would be _False_. +** *Value*: The value that a users attribute will be matched against based on the *Comparison* field. See examples in the *Comparison* definition in this section. ++ +[NOTE] +==== +If the *Comparison* type is *in*, this field can be a comma separated list (without spaces). +==== diff --git a/downstream/modules/platform/con-gw-authenticator-map-types.adoc b/downstream/modules/platform/con-gw-authenticator-map-types.adoc new file mode 100644 index 0000000000..20645d38e6 --- /dev/null +++ b/downstream/modules/platform/con-gw-authenticator-map-types.adoc @@ -0,0 +1,15 @@ +:_mod-docs-content-type: CONCEPT + +[id="gw-authenticator-map-types"] + += Authenticator map types + +{PlatformNameShort} supports the following rule types: + +Allow:: Determine if the user is allowed to log into the system. +Organization:: Determine if a user should be put into an organization. +Team:: Determine if the user should be a member of a team. +Role:: Determine if the user is a member of a role (for example, _System Auditor_). +Is Superuser:: Determine if the user is a superuser in the system. + +These authentication map types can be used with any type of authenticator. diff --git a/downstream/modules/platform/con-gw-cache-queue.adoc b/downstream/modules/platform/con-gw-cache-queue.adoc new file mode 100644 index 0000000000..a805d50054 --- /dev/null +++ b/downstream/modules/platform/con-gw-cache-queue.adoc @@ -0,0 +1,25 @@ +:_mod-docs-content-type: CONCEPT + +[id="gw-cache-queue_{context}"] + += Caching and queueing system + +In {PlatformNameShort} {PlatformVers}, link:https://redis.io/[Redis (REmote DIctionary Server)] is used as the caching and queueing system. Redis is an open source, in-memory, NoSQL key/value store that is used primarily as an application cache, quick-response database and lightweight message broker. + +Centralized Redis is provided for the {Gateway} and {EDAName} and shared between those components. {ControllerNameStart} and {HubName} have their own instances of Redis. + +This cache and queue system stores data in memory, rather than on a disk or solid-state drive (SSD), which helps deliver speed, reliability, and performance. In {PlatformNameShort}, the system caches the following types of data for the various services in {PlatformNameShort}: + +.Data types cached by Centralized Redis +[options="header"] +|==== +| {ControllerNameStart} | {EDAName} server | {HubNameStart} | {GatewayStart} +| N/A {ControllerName} does not use shared Redis in {PlatformNameShort} {PlatformVers} | Event queues | N/A {HubName} does not use shared Redis in {PlatformNameShort} {PlatformVers} | Settings, Session Information, JSON Web Tokens +|==== + +This data can contain sensitive Personal Identifiable Information (PII). Your data is protected through secure communication with the cache and queue system through both Transport Layer Security (TLS) encryption and authentication. + +[NOTE] +==== +The data in Redis from both the {Gateway} and {EDAName} are partitioned; therefore, neither service can access the other’s data. +==== \ No newline at end of file diff --git a/downstream/modules/platform/con-gw-centralized-redis.adoc b/downstream/modules/platform/con-gw-centralized-redis.adoc new file mode 100644 index 0000000000..bc45bce00e --- /dev/null +++ b/downstream/modules/platform/con-gw-centralized-redis.adoc @@ -0,0 +1,7 @@ +:_mod-docs-content-type: CONCEPT + +[id="gw-centralized-redis_{context}"] + += Centralized Redis + +{PlatformNameShort} offers a centralized Redis instance in both xref:gw-single-node-redis_planning[standalone] and xref:gw-clustered-redis_planning[clustered] topologies. This enables resiliency by providing consistent performance and reliability. \ No newline at end of file diff --git a/downstream/modules/platform/con-gw-clustered-redis.adoc b/downstream/modules/platform/con-gw-clustered-redis.adoc new file mode 100644 index 0000000000..f56db6b267 --- /dev/null +++ b/downstream/modules/platform/con-gw-clustered-redis.adoc @@ -0,0 +1,30 @@ +:_mod-docs-content-type: CONCEPT + +[id="gw-clustered-redis_{context}"] + += Clustered Redis + +With clustered Redis, data is automatically partitioned over multiple nodes to provide performance stability and nodes are assigned as replicas to provide reliability. Clustered Redis, shared between the {Gateway} and {EDAName}, is provided by default when installing {PlatformNameShort} in containerized and operator-based deployments. + +[NOTE] +==== +6 VMs are required for a Redis high availability (HA) compatible deployment. In RPM deployments, Redis can be colocated on each {PlatformNameShort} component VM except for {ControllerName}, execution nodes, or the PostgreSQL database. In containerized deployments, Redis can be colocated on any {PlatformNameShort} component VMs of your choice except for execution nodes or the PostgreSQL database. See link:{LinkTopologies} for the opinionated deployment options available. +==== + +A cluster contains three primary nodes and each primary node contains a replica node. + +If a primary instance becomes unavailable due to failures, the other primary nodes will initiate a failover state to promote a replica node to a primary node. + +image::gw-clustered-redis.png[Single-node Redis deployment] + +The benefits of deploying clustered Redis over standalone Redis include the following: + +* Data is automatically split across multiple nodes. +* Data can be dynamically adjusted. +* Automatic failover of the primary nodes is initiated during system failures. + +Therefore, if you need data scalability and automatic failover, deploy {PlatformNameShort} with a clustered Redis. For more information about scalability with Redis, refer to link:https://redis.io/docs/latest/operate/oss_and_stack/management/scaling/[Scale with Redis Cluster] in the Redis product documentation. + +For information on deploying {PlatformNameShort} with clustered Redis, refer to the link:{LinkInstallationGuide}, link:{LinkContainerizedInstall}, and link:{LinkOperatorInstallation} guides. + +include::../aap-common/external-site-disclaimer.adoc[] \ No newline at end of file diff --git a/downstream/modules/platform/con-gw-create-authentication.adoc b/downstream/modules/platform/con-gw-create-authentication.adoc new file mode 100644 index 0000000000..25ebf22838 --- /dev/null +++ b/downstream/modules/platform/con-gw-create-authentication.adoc @@ -0,0 +1,21 @@ +:_mod-docs-content-type: CONCEPT + +[id="gw-create-authentication"] + += Creating an authentication method + +The *Create Authentication* wizard guides you through the steps to create a new authentication method for your organization. The wizard is launched during the create authentication process. + +Creating an authenticator involves the following procedures: + +. xref:gw-select-auth-type[Authentication type], where you select the type of authenticator plugin you want to configure. +. xref:gw-configure-auth-details[Authentication details], where you configure the authentication details for the plugin you selected. +. xref:gw-define-rules-triggers[Mapping], where you define mapping rule types and triggers to control access to the system. +. xref:gw-adjust-mapping-order[Mapping order], where you can define the mapping precedence. ++ +[NOTE] +==== +Mapping order is only available if you have defined one or more authenticator maps. +==== ++ +. xref:gw-review-auth-settings[Review], where you can review and confirm the authentication settings before creating the authentication method. \ No newline at end of file diff --git a/downstream/modules/platform/con-gw-dash-components.adoc b/downstream/modules/platform/con-gw-dash-components.adoc new file mode 100644 index 0000000000..ce6aeb5b5a --- /dev/null +++ b/downstream/modules/platform/con-gw-dash-components.adoc @@ -0,0 +1,15 @@ +:_mod-docs-content-type: CONCEPT + +[id="gw-dash-components"] + += {PlatformNameShort} dashboard components + +Quick starts:: You can learn about Ansible automation functions with guided tutorials called quick starts. In the dashboard, you can access quick starts by selecting a quick start card. From the panel displayed, click btn:[Start] and complete the onscreen instructions. You can also filter quick starts by keyword and status. +Resource status:: Indicates the status of your hosts, projects and inventories. The status indicator links to your configured hosts, projects and inventories where you can search, filter, add and modify these resources. +Job Activity:: You can view a summary of your current job status, filter the job status within a period of time or by job type, or click *Go to Jobs* to view a complete list of jobs that are currently available. +Jobs:: You can view recent jobs that have run, or click *View all Jobs* to view a complete list of jobs that are currently available, or create a new job. +Projects: You can view recently updated projects or click *View all Projects* to view a complete list of the projects that are currently available, or create a new project. +Inventories:: You can view recently updated inventories or click *View all Inventories* to view a complete list of available inventories, or create a new inventory. +Rulebook Activations:: You can view the list of recent rulebook activations and their status, display the complete list of rulebook activations that are currently available, or create a new rulebook activation. +Rule Audit:: You view recently fired rule audits, view rule audit records, and view rule audit data based on corresponding rulebook activation runs. +Decision Environments:: You can view recently updated decision environments, or click *View all Decision Environments* to view a complete list of available inventories, or create a new decision environment. diff --git a/downstream/modules/platform/con-gw-dash-features.adoc b/downstream/modules/platform/con-gw-dash-features.adoc new file mode 100644 index 0000000000..565f17337a --- /dev/null +++ b/downstream/modules/platform/con-gw-dash-features.adoc @@ -0,0 +1,9 @@ +:_mod-docs-content-type: CONCEPT + +[id="con-gw-dash-features"] + += {PlatformNameShort} dashboard features + +The {PlatformNameShort} dashboard provides the following features: + +Manage view:: You can enable, disable, and sort dashboard components so only the features you need are visible on the dashboard. diff --git a/downstream/modules/platform/con-gw-managing-access.adoc b/downstream/modules/platform/con-gw-managing-access.adoc new file mode 100644 index 0000000000..91c2473867 --- /dev/null +++ b/downstream/modules/platform/con-gw-managing-access.adoc @@ -0,0 +1,11 @@ +:_mod-docs-content-type: CONCEPT + +[id="gw-managing-access"] + += Managing access with role based access control + +Role-based access control (RBAC) restricts user access based on their role within an organization. The roles in RBAC refer to the levels of access that users have to the network. + +You can control what users can do with the components of {PlatformNameShort} at a broad or granular level depending on your RBAC policy. You can designate whether the user is a system administrator or normal user and align roles and access permissions with their positions within the organization. + +Roles can be defined with multiple permissions that can then be assigned to resources, teams and users. The permissions that make up a role dictate what the assigned role allows. Permissions are allocated with only the access needed for a user to perform the tasks appropriate for their role. diff --git a/downstream/modules/platform/con-gw-overview-access-auth.adoc b/downstream/modules/platform/con-gw-overview-access-auth.adoc new file mode 100644 index 0000000000..d0cd4c934b --- /dev/null +++ b/downstream/modules/platform/con-gw-overview-access-auth.adoc @@ -0,0 +1,19 @@ +:_mod-docs-content-type: CONCEPT + +[id="gw-overview-access-auth"] + += Overview of access management and authentication + +{PlatformNameShort} features a platform interface where you can set up centralized authentication, configure access management, and configure global and system level settings from a single location. + +The first time you log in to the {PlatformNameShort}, you must enter your subscription information to activate the platform. For more information about licensing and subscriptions, refer to xref:assembly-gateway-licensing[Managing {PlatformNameShort} licensing, updates and support]. + +A system administrator can configure access, permissions and system settings through the following tasks: + +* xref:gw-configure-authentication[Configuring authentication in the {PlatformNameShort}], where you set up simplified login for users by selecting from several authentication methods available and define permissions and assign them to users with authenticator maps. + +* xref:gw-token-based-authentication[Configuring access to external applications with token-based authentication], where you can configure authentication of third-party tools and services with the platform through integrated OAuth 2 token support. + +* xref:gw-managing-access[Managing access with role based access control], where you configure user access based on their role within a platform organization. + +* xref:assembly-gw-settings[Configuring {PlatformNameShort}], where you can configure global and system level settings for the platform and services. \ No newline at end of file diff --git a/downstream/modules/platform/con-gw-pluggable-authentication.adoc b/downstream/modules/platform/con-gw-pluggable-authentication.adoc new file mode 100644 index 0000000000..75f68f8961 --- /dev/null +++ b/downstream/modules/platform/con-gw-pluggable-authentication.adoc @@ -0,0 +1,17 @@ +:_mod-docs-content-type: CONCEPT + +[id="gw-pluggable-authentication"] + += Pluggable authentication + +Authentication is the process of verifying a user's identity to the {PlatformNameShort} (that is, to establish that a user is who they say they are). This can be done in a number of ways but would traditionally be associated with a `username` and `password`. + +{PlatformNameShort} {PlatformVers} uses a pluggable authentication system with a configuration wizard that provides a common, simplified method of configuring different types of authenticators such as LDAP and SAML. The pluggable system also allows you to configure multiple authenticators of the same type. + +In the pluggable system we have a couple of concepts: + +Authenticator Plugin:: A plugin allows {PlatformNameShort} to connect to a source system, such as, LDAP or SAML. {PlatformNameShort} includes a variety of authenticator plugins. Authenticator plugins are similar to Ansible collections, in that all of the required code is in a package and can be versioned independently if needed. + +Authenticator:: An authenticator is an instantiation of an authenticator plugin and allows users from the specified source to log in. For example, the LDAP authenticator plugin defines a required LDAP server setting. When you instantiate an authenticator from the LDAP authentication plugin, you must provide the authenticator the LDAP server URL it needs to connect to. + +Authenticator Map:: Authenticator maps are applied to authenticators and tell {PlatformNameShort} what permissions to give a user logging into the system. \ No newline at end of file diff --git a/downstream/modules/platform/con-gw-roles.adoc b/downstream/modules/platform/con-gw-roles.adoc new file mode 100644 index 0000000000..83ec9df0c2 --- /dev/null +++ b/downstream/modules/platform/con-gw-roles.adoc @@ -0,0 +1,7 @@ +:_mod-docs-content-type: CONCEPT + +[id="con-gw-roles"] + += Roles + +Roles are units of organization in the {PlatformName}. When you assign a role to a team or user, you are granting access to use, read, or write credentials. Because of the file structure associated with a role, roles become redistributable units that enable you to share behavior among resources, or with other users. All access that is granted to use, read, or write credentials is handled through roles, and roles are defined for a resource. diff --git a/downstream/modules/platform/con-gw-single-node-redis.adoc b/downstream/modules/platform/con-gw-single-node-redis.adoc new file mode 100644 index 0000000000..8c9fb5f964 --- /dev/null +++ b/downstream/modules/platform/con-gw-single-node-redis.adoc @@ -0,0 +1,13 @@ +:_mod-docs-content-type: CONCEPT + +[id="gw-single-node-redis_{context}"] + +//[ddacosta] - changed from single-node to standalone to align with "in" product terminology + += Standalone Redis + +Standalone Redis consists of a simple architecture that is easy to deploy and configure. + +image::gw-single-node-redis.png[Standalone Redis deployment] + +If a resilient solution is not a requirement, deploy {PlatformNameShort} with a standalone Redis. diff --git a/downstream/modules/platform/con-ha-hub-installation.adoc b/downstream/modules/platform/con-ha-hub-installation.adoc index 6ef4574657..60010ba9fa 100644 --- a/downstream/modules/platform/con-ha-hub-installation.adoc +++ b/downstream/modules/platform/con-ha-hub-installation.adoc @@ -5,7 +5,7 @@ Use the following examples to populate the inventory file to install a highly available {HubName}. This inventory file includes a highly available {HubName} with a clustered setup. //dcdacosta - include a link to the RHSSO content once it's added. -You can configure your HA deployment further to implement {RHSSO} and enable a xref:proc-install-ha-hub-selinux[high availability deployment of {HubName} on SELinux]. +You can configure your HA deployment further to enable a xref:proc-install-ha-hub-selinux[high availability deployment of {HubName} on SELinux]. .Specify database host IP @@ -26,9 +26,9 @@ automationhub_pg_port=5432 * If installing a clustered setup, replace `localhost ansible_connection=local` in the [automationhub] section with the hostname or IP of all instances. For example: ----- [automationhub] -automationhub1.testing.ansible.com ansible_user=cloud-user ansible_host=192.0.2.18 -automationhub2.testing.ansible.com ansible_user=cloud-user ansible_host=192.0.2.20 -automationhub3.testing.ansible.com ansible_user=cloud-user ansible_host=192.0.2.22 +automationhub1.testing.ansible.com ansible_user=cloud-user +automationhub2.testing.ansible.com ansible_user=cloud-user +automationhub3.testing.ansible.com ansible_user=cloud-user ----- [role="_additional-resources"] diff --git a/downstream/modules/platform/con-hs-eda-controller.adoc b/downstream/modules/platform/con-hs-eda-controller.adoc new file mode 100644 index 0000000000..4121723ba5 --- /dev/null +++ b/downstream/modules/platform/con-hs-eda-controller.adoc @@ -0,0 +1,39 @@ +[id="con-hs-eda-controller"] + += Horizontal scaling in {EDAcontroller} + +With {EDAcontroller}, you can set up horizontal scaling for your events automation. This multi-node deployment enables you to define as many nodes as you prefer during the installation process. You can also increase or decrease the number of nodes at any time according to your organizational needs. + +The following node types are used in this deployment: + +API node type:: Responds to the HTTP REST API of {EDAcontroller}. +Worker node type:: Runs an {EDAName} worker, which is the component of {EDAName} that not only manages projects and activations, but also executes the activations themselves. +Hybrid node type:: Is a combination of the API node and the worker node. + +// This content is used in RPM installation +ifdef::aap-install[] +The following example shows how you can set up an inventory file for horizontal scaling of {EDAcontroller} on {RHEL} VMs using the host group name `[automationedacontroller]` and the node type variable `eda_node_type`: + +----- +[automationedacontroller] + +3.88.116.111 routable_hostname=automationedacontroller-api.example.com eda_node_type=api + +# worker node +3.88.116.112 routable_hostname=automationedacontroller-api.example.com eda_node_type=worker +----- +endif::aap-install[] + +// This content is used in Containerized installation +ifdef::container-install[] +The following example shows how you can set up an inventory file for horizontal scaling of {EDAcontroller} on {RHEL} VMs using the host group name `[automationeda]` and the node type variable `eda_type`: + +----- +[automationeda] + +3.88.116.111 routable_hostname=automationeda-api.example.com eda_type=api + +# worker node +3.88.116.112 routable_hostname=automationeda-api.example.com eda_type=worker +----- +endif::container-install[] \ No newline at end of file diff --git a/downstream/modules/platform/con-hs-eda-sizing-scaling.adoc b/downstream/modules/platform/con-hs-eda-sizing-scaling.adoc new file mode 100644 index 0000000000..29a0221478 --- /dev/null +++ b/downstream/modules/platform/con-hs-eda-sizing-scaling.adoc @@ -0,0 +1,11 @@ +[id="con-hs-eda-sizing-scaling"] + += Sizing and scaling guidelines + +API nodes process user requests (interactions with the UI or API) while worker nodes process the activations and other background tasks required for {EDAName} to function properly. The number of API nodes you require correlates to the desired number of users of the application and the number of worker nodes correlates to the desired number of activations you want to run. + +Since activations are variable and controlled by worker nodes, the supported approach for scaling is to use separate API and worker nodes instead of hybrid nodes due to the efficient allocation of hardware resources by worker nodes. By separating the nodes, you can scale each type independently based on specific needs, leading to better resource utilization and cost efficiency. + +An example of an instance in which you might consider scaling up your node deployment is when you want to deploy {EDAName} for a small group of users who will run a large number of activations. In this case, one API node is adequate, but if you require more, you can scale up to three additional worker nodes. + +To set up a multi-node deployment, follow the procedure in xref:proc-hs-eda-setup[Setting up horizontal scaling for {EDAcontroller}]. diff --git a/downstream/modules/platform/con-install-mesh.adoc b/downstream/modules/platform/con-install-mesh.adoc index e8b17a463d..eb990f453e 100644 --- a/downstream/modules/platform/con-install-mesh.adoc +++ b/downstream/modules/platform/con-install-mesh.adoc @@ -2,10 +2,10 @@ = {AutomationMesh} Installation -You use the {PlatformNameShort} installation program to set up {AutomationMesh} or to upgrade to {AutomationMesh}. -To provide {PlatformNameShort} with details about the nodes, groups, and peer relationships in your mesh network, you define them in an the `inventory` file in the installer bundle. +For a VM-based install of {PlatformNameShort} you use the installation program to set up {AutomationMesh} or to upgrade to {AutomationMesh}. +To provide {PlatformNameShort} with details about the nodes, groups, and peer relationships in your mesh network, you define them in an the `inventory` file in the installer bundle. For managed cloud, OpenShift, or operator environments, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_mesh_for_managed_cloud_or_operator_environments/index[{AutomationMeshStart} for managed cloud or operator environments]. [role="_additional-resources"] .Additional Resources -* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/index[{PlatformName} Installation Guide] -* <> +* link:{URLInstallationGuide}/index[{TitleInstallationGuide}] +* xref:design-patterns[Automation mesh design patterns] diff --git a/downstream/modules/platform/con-install-scenario-recommendations.adoc b/downstream/modules/platform/con-install-scenario-recommendations.adoc index 578daf6b11..6476df269b 100644 --- a/downstream/modules/platform/con-install-scenario-recommendations.adoc +++ b/downstream/modules/platform/con-install-scenario-recommendations.adoc @@ -5,14 +5,11 @@ [role="_abstract"] Before selecting your installation method for {PlatformNameShort}, review the following recommendations. Familiarity with these recommendations will streamline the installation process. -* For {PlatformName} or {HubName}: Add an {HubName} host in the `[automationhub]` group. // Removed for AAP-20847 and until such time as a decision is made regarding database support. //* Internal databases `[database]` are not supported. See the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/containerized_ansible_automation_platform_installation_guide/index[Containerized {PlatformName} Installation Guide] for further information on using the containerized installer for environments requiring a monolithc deployment. -* Do not install {ControllerName} and {HubName} on the same node for versions of {PlatformNameShort} in a production or customer environment. -This can cause contention issues and heavy resource use. -* Provide a reachable IP address or fully qualified domain name (FQDN) for the `[automationhub]` and `[automationcontroller]` hosts to ensure users can sync and install content from {HubName} from a different node. +* Provide a reachable IP address or fully qualified domain name (FQDN) for hosts to ensure users can sync and install content from {HubName} from a different node. + -The FQDN must not contain either the `-` or the `_` symbols, as it will not be processed correctly. +The FQDN must not contain either the `-` or the `_` symbols, as it will not be processed correctly. + Do not use `localhost`. * `admin` is the default user ID for the initial log in to {PlatformNameShort} and cannot be changed in the inventory file. diff --git a/downstream/modules/platform/con-known-proxies.adoc b/downstream/modules/platform/con-known-proxies.adoc index 052f2c8722..dc6235b4e6 100644 --- a/downstream/modules/platform/con-known-proxies.adoc +++ b/downstream/modules/platform/con-known-proxies.adoc @@ -10,8 +10,16 @@ When {ControllerName} is configured with `REMOTE_HOST_HEADERS = ['HTTP_X_FORWARD If {ControllerName} is reachable without use of the proxy/load balancer, or if the proxy does not validate the header, the value of `X-Forwarded-For` can be falsified to fake the originating IP addresses. Using `HTTP_X_FORWARDED_FOR` in the `REMOTE_HOST_HEADERS` setting poses a vulnerability. -To avoid this, you can configure a list of known proxies that are allowed using the *PROXY_IP_ALLOWED_LIST* field in the settings menu on your {ControllerName}. +To avoid this, you can configure a list of known proxies that are allowed. + +.Procedure +. From the navigation panel, select {MenuSetSystem}. +. Enter a list of proxy IP addresses from which the service should trust custom remote header values in the *Proxy IP Allowed List* field. ++ +[NOTE] +==== Load balancers and hosts that are not on the known proxies list will result in a rejected request. +==== //.Example vulnerabilities: // diff --git a/downstream/modules/platform/con-ocp-supported-install.adoc b/downstream/modules/platform/con-ocp-supported-install.adoc index 53c4b4d571..df210c6dd6 100644 --- a/downstream/modules/platform/con-ocp-supported-install.adoc +++ b/downstream/modules/platform/con-ocp-supported-install.adoc @@ -1,15 +1,40 @@ -[id="con-ocp-supported-install_{context}"] +[id="ocp-supported-install_{context}"] = Supported installation scenarios for {OCP} -You can use the OperatorHub on the {OCP} web console to install {OperatorPlatform}. +You can use the OperatorHub on the {OCP} web console to install {OperatorPlatformNameShort}. -Alternatively, you can install {OperatorPlatform} from the {OCPShort} command-line interface (CLI), `oc`. +Alternatively, you can install {OperatorPlatformNameShort} from the {OCPShort} command-line interface (CLI), `oc`. See xref:installing-aap-operator-cli_operator-platform-doc[Installing {OperatorPlatformName} from the {OCPShort} CLI] for help with this. -Follow one of the workflows below to install the {OperatorPlatform} and use it to install the components of {PlatformNameShort} that you require. +After you have installed {OperatorPlatformNameShort} you must create an *{PlatformNameShort}* custom resource (CR). This enables you to manage {PlatformNameShort} components from a single unified interface known as the {Gateway}. As of version 2.5, you must create an {PlatformNameShort} CR, even if you have an existing {ControllerName}, {HubName}, or {EDAName}, components. -* {ControllerNameStart} custom resources first, then {HubName} custom resources; -* {HubNameStart} custom resources first, then {ControllerName} custom resources; -* {ControllerNameStart} custom resources; -* {HubNameStart} custom resources. +If existing components have already been deployed, you must specify these components on the {PlatformNameShort} CR. You must create the custom resource in the same namespace as the existing components. + +[cols=2*a,options="header"] +|=== +| *Supported scenarios* | *Supported scenarios with existing components* +| +* {PlatformNameShort} CR for blank slate install with {ControllerName}, {HubName}, and {EDAName} enabled + +* {PlatformNameShort} CR with just {ControllerName} enabled + +* {PlatformNameShort} CR with just {ControllerName}, {HubName} enabled + +* {PlatformNameShort} CR with just {ControllerName}, {EDAName} enabled + | + * {PlatformNameShort} CR created in the same namespace as an existing {ControllerName} CR with the {ControllerName} name specified on the {PlatformNameShort} CR spec + +* Same with {ControllerName} and {HubName} + +* Same with {ControllerName}, {HubName}, and {EDAName} + +* Same with {ControllerName} and {EDAName} +|=== + + +//Commenting out as upgrade is not included in EA [gmurray] +//[NOTE] +//==== +//The stand-alone EDA user interface will not work upon upgrade. After you configure {PlatformNameShort}, other stand-alone user interfaces will not work. +//==== diff --git a/downstream/modules/platform/con-operator-ansible-verbosity.adoc b/downstream/modules/platform/con-operator-ansible-verbosity.adoc new file mode 100644 index 0000000000..98c555ae09 --- /dev/null +++ b/downstream/modules/platform/con-operator-ansible-verbosity.adoc @@ -0,0 +1,44 @@ +[id="con-operator-ansible-verbosity_{context}"] + += Ansible verbosity + +Setting the verbosity of the `ansible-runner` command, controls the output detail of `ansible-playbook`. The verbosity ranges from 0 (minimal output) to 7 (maximum debugging). + +{OperatorPlatform} users and admins can set the Ansible verbosity by setting the "ansible.sdk.operatorframework.io/verbosity" annotation on the Custom Resource. + +.Example +For a database operator with `MongoDB` and `PostgreSQL` in the `db.example.com` Group, you can configure the `MongoDB` verbosity higher to debug. The operator container’s spec in the `config/manager/manager.yaml` would look like this: + +---- +name: manager + image: "quay.io/example/database-operator:v1.0.0" + imagePullPolicy: "Always" + args: + # This value applies to all GVKs specified in watches.yaml + # that are not overridden by environment variables. + - "--ansible-verbosity" + - "1" + env: + # Override the verbosity for the MongoDB kind + - name: ANSIBLE_VERBOSITY_MONGODB_DB_EXAMPLE_COM + value: "4" +---- + +After the {OperatorPlatform} is deployed, the only way to change the verbosity is through the "ansible.sdk.operatorframework.io/verbosity" annotation. Continuing with the above example, the Custom Resource may look like: + +---- +apiVersion: automationcontroller.ansible.com/v1beta1 +kind: AutomationController +metadata: + annotations: + "ansible.sdk.operatorframework.io/verbosity": "5" + creationTimestamp: '2024-10-02T12:24:35Z' + generation: 3 + labels: + app.kubernetes.io/component: automationcontroller + app.kubernetes.io/managed-by: automationcontroller-operator + app.kubernetes.io/operator-version: '2.5' + +spec: + +---- \ No newline at end of file diff --git a/downstream/modules/platform/con-operator-channel-upgrade.adoc b/downstream/modules/platform/con-operator-channel-upgrade.adoc new file mode 100644 index 0000000000..6157874f79 --- /dev/null +++ b/downstream/modules/platform/con-operator-channel-upgrade.adoc @@ -0,0 +1,50 @@ +[id="operator-channel-upgrade_{context}"] + += Channel upgrades + +Upgrading to version 2.5 from {PlatformNameShort} 2.4 involves retrieving updates from a “channel”. +A channel refers to a location where you can access your update. +It currently resides in the OpenShift console UI. + +image:change_subscription.png[Update channel] + +== In-channel upgrades + +Most upgrades occur within a channel as follows: + +. A new update becomes available in the marketplace, through the redhat-operator CatalogSource. +. The system automatically creates a new InstallPlan for your {PlatformNameShort} subscription. +* If set to *Manual*, the InstallPlan needs manual approval in the OpenShift UI. +* If set to *Automatic*, it upgrades as soon as the new version is available. ++ +[NOTE] +==== +Set a manual install strategy on your {OperatorPlatformNameShort} subscription during installation or upgrade. You will be prompted to approve upgrades when available in your chosen update channel. Stable channels, like stable-2.5, are available for each X.Y release. +==== ++ +. A new subscription, CSV, and operator containers are created alongside the old ones. +The old resources are cleaned up after a successful install. + +== Cross-channel upgrades + +Upgrading between X.Y channels is always manual and intentional. +Stable channels for major and minor versions are in the Operator Catalog. +Currently, only version 2.x is available, so there are few channels. +It is recommended to stay on the latest minor version channel for the latest patches. + +If the subscription is set for manual upgrades, you must approve the upgrade in the UI. Then, the system upgrades the Operator to the latest version in that channel. +[NOTE] +==== +It is recommended to set a manual install strategy on your {OperatorPlatformNameShort} subscription during installation or upgrade. +You will be prompted to approve upgrades when they become available in your chosen update channel. +Stable channels, such as stable-2.5, are available for each X.Y release. +==== + +The containers provided in the latest channel are updated regularly for OS upgrades and critical fixes. This allows customers to receive critical patches and CVE fixes faster. Larger changes and new features are saved for minor and major releases. + +For each major or minor version channel, there is a corresponding "cluster-scoped" channel available. Cluster-scoped channels deploy operators that can manage all namespaces, while non-cluster-scoped channels can only manage resources in their own namespace. + +[IMPORTANT] +==== +Cluster-scoped bundles are not compatible with namespace-scoped bundles. Do not try to switch between normal (stable-2.4 for example) channels and cluster-scoped (stable-2.4-cluster-scoped) channels, as this is not supported. +==== \ No newline at end of file diff --git a/downstream/modules/platform/con-operator-custom-resources.adoc b/downstream/modules/platform/con-operator-custom-resources.adoc index a4c3a41dac..ad1cb569db 100644 --- a/downstream/modules/platform/con-operator-custom-resources.adoc +++ b/downstream/modules/platform/con-operator-custom-resources.adoc @@ -3,3 +3,13 @@ = Custom resources You can define custom resources for each primary installation workflows. + +//[Jameria] Moved this topic from supported installation section to custom resources since that's what the cross-referenced topic links to in the appendix (Custom resources appendix) +== Modifying the number of simultaneous rulebook activations during or after {EDAcontroller} installation + +* If you plan to install {EDAName} on {OCPShort} and modify the number of simultaneous rulebook activations, add the required `EDA_MAX_RUNNING_ACTIVATIONS` parameter to your custom resources. By default, {EDAcontroller} allows 12 activations per node to run simultaneously. See the example in appendix link:{URLOperatorInstallation}/appendix-operator-crs_performance-considerations#eda_max_running_activations[EDA_MAX_RUNNING_ACTIVATIONS]. + +[NOTE] +==== +`EDA_MAX_RUNNING_ACTIVATIONS` for {OCPShort} is a global value since there is no concept of worker nodes when installing {EDAName} on {OCPShort}. +==== diff --git a/downstream/modules/platform/con-operator-upgrade-considerations.adoc b/downstream/modules/platform/con-operator-upgrade-considerations.adoc index 646d66df6f..c09007d07d 100644 --- a/downstream/modules/platform/con-operator-upgrade-considerations.adoc +++ b/downstream/modules/platform/con-operator-upgrade-considerations.adoc @@ -2,11 +2,9 @@ = Upgrade considerations +If you are upgrading from version 2.4, continue to the xref:upgrading-operator_{context}[Upgrading the {OperatorPlatformNameShort}]. -[role="_abstract"] -{PlatformName} version 2.0 was the first release of the {OperatorPlatform}. If you are upgrading from version 2.0, continue to the xref:upgrading-operator_operator-upgrade[Upgrading the {OperatorPlatform}] procedure. - -If you are using a version of {OCPShort} that is not supported by the version of {PlatformName} to which you are upgrading, you must upgrade your {OCPShort} cluster to a supported version prior to upgrading. +If your {OCPShort} version is not supported by the {PlatformName} version you are upgrading to, you must upgrade your {OCPShort} cluster to a supported version first. Refer to the link:https://access.redhat.com/support/policy/updates/ansible-automation-platform[Red Hat Ansible Automation Platform Life Cycle] to determine the {OCPShort} version needed. diff --git a/downstream/modules/platform/con-operator-upgrade-overview.adoc b/downstream/modules/platform/con-operator-upgrade-overview.adoc new file mode 100644 index 0000000000..3cbbba03da --- /dev/null +++ b/downstream/modules/platform/con-operator-upgrade-overview.adoc @@ -0,0 +1,20 @@ +[id="operator-upgrade-overview"] + += Overview +You can use this document for help with upgrading {PlatformNameShort} 2.4 to 2.5 on {OCP}. +This document applies to upgrades of {PlatformNameShort} 2.5 to later versions of 2.5. + +The {OperatorPlatformNameShort} manages deployments, upgrades, backups, and restores of {ControllerName} and {HubName}. +It also handles deployments of AnsibleJob and JobTemplate resources from the {PlatformNameShort} Resource Operator. + +Each operator version has default {ControllerName} and {HubName} versions. +When the operator is upgraded, it also upgrades the {ControllerName} and {HubName} deployments it manages, unless overridden in the spec. + +OpenShift deployments of {PlatformNameShort} use the built-in Operator Lifecycle Management (OLM) functionality. +For more information, see link:https://docs.openshift.com/container-platform/4.16/operators/understanding/olm/olm-understanding-olm.html[Operator Lifecycle Manager concepts and resources]. +OpenShift does this by using Subscription, CSV, InstallPlan, and OperatorGroup objects. +Most users will not have to interact directly with these resources. +They are created when the {OperatorPlatformNameShort} is installed from *OperatorHub* and managed through the *Subscriptions* tab in the OpenShift console UI. +For more information, refer to link:https://docs.openshift.com/container-platform/4.16/web_console/web-console.html[Accessing the web console]. + +image:Subscription_tab.png[Subscription tab] \ No newline at end of file diff --git a/downstream/modules/platform/con-operator-upgrade-prereq.adoc b/downstream/modules/platform/con-operator-upgrade-prereq.adoc index 60e7b3b0a4..5dc60d4335 100644 --- a/downstream/modules/platform/con-operator-upgrade-prereq.adoc +++ b/downstream/modules/platform/con-operator-upgrade-prereq.adoc @@ -3,9 +3,8 @@ = Prerequisites -[role="_abstract"] -To upgrade to a newer version of {OperatorPlatform}, it is recommended that you do the following: +To upgrade to a newer version of {OperatorPlatformNameShort}, you must: -* Create AutomationControllerBackup and AutomationHubBackup objects. For help with this see link:{BaseURL}red_hat_ansible_automation_platform/{PlatformVers}/html-single/red_hat_ansible_automation_platform_operator_backup_and_recovery_guide/index#aap-backup-recommendations[Creating Red Hat Ansible Automation Platform backup resources] -//See (Backup and Restore) for information on creating backup objects. [add link to new backup and restore doc when complete] -* Review the release notes for the new {PlatformNameShort} version to which you are upgrading and any intermediate versions. +* Create AutomationControllerBackup and AutomationHubBackup objects. For help with this see link:{URLOperatorBackup}[{TitleOperatorBackup}] +* Review the link:{URLReleaseNotes}[{TitleReleaseNotes}] for the new {PlatformNameShort} version to which you are upgrading and any intermediate versions. +* Determine the type of upgrade you want to perform. See the xref:operator-channel-upgrade_{context}[Channel Upgrades] section for more information. diff --git a/downstream/modules/platform/con-resource-operator-overview.adoc b/downstream/modules/platform/con-resource-operator-overview.adoc index f3c0312fe6..cf9bdea188 100644 --- a/downstream/modules/platform/con-resource-operator-overview.adoc +++ b/downstream/modules/platform/con-resource-operator-overview.adoc @@ -1,7 +1,7 @@ [id="con-controller-resource-operator_{context}"] = {OperatorResourceShort} overview -{OperatorResourceShort} is a custom resource (CR) that you can deploy after you have created your {ControllerName} deployment. +{OperatorResourceShort} is a custom resource (CR) that you can deploy after you have created your {Gateway} deployment. With {OperatorResourceShort} you can define projects, job templates, and inventories through the use of YAML files. These YAML files are then used by {ControllerName} to create these resources. You can create the YAML through the *Form view* that prompts you for keys and values for your YAML code. diff --git a/downstream/modules/platform/con-sticky-sessions.adoc b/downstream/modules/platform/con-sticky-sessions.adoc index da473b1216..7adb5647f2 100644 --- a/downstream/modules/platform/con-sticky-sessions.adoc +++ b/downstream/modules/platform/con-sticky-sessions.adoc @@ -3,4 +3,4 @@ = Enable sticky sessions [role="_abstract"] -By default, an Application Load Balancer routes each request independently to a registered target based on the chosen load-balancing algorithm. To avoid authentication errors when running multiple instances of {HubName} behind a load balancer, you must enable sticky sessions. Enabling sticky sessions sets a custom application cookie that matches the cookie configured on the load balancer to enable stickiness. This custom cookie can include any of the cookie attributes required by the application. +By default, an application load balancer routes each request independently to a registered target based on the chosen load-balancing algorithm. To avoid authentication errors when running multiple instances of {HubName} behind a load balancer, you must enable sticky sessions. Enabling sticky sessions sets a custom application cookie that matches the cookie configured on the load balancer to enable stickiness. This custom cookie can include any of the cookie attributes required by the application. diff --git a/downstream/modules/platform/con-storage-options-for-operator-installation-on-ocp.adoc b/downstream/modules/platform/con-storage-options-for-operator-installation-on-ocp.adoc index 09bee5434a..62352bfa29 100644 --- a/downstream/modules/platform/con-storage-options-for-operator-installation-on-ocp.adoc +++ b/downstream/modules/platform/con-storage-options-for-operator-installation-on-ocp.adoc @@ -1,11 +1,11 @@ [id="con-storage-options-for-operator-installation-on-ocp_{context}"] -= Storage options for {OperatorPlatform} installation on {OCP} += Storage options for {OperatorPlatformNameShort} installation on {OCP} {HubNameStart} requires `ReadWriteMany` file-based storage, Azure Blob storage, or Amazon S3-compliant storage for operation so that multiple pods can access shared content, such as collections. The process for configuring object storage on the `AutomationHub` CR is similar for Amazon S3 and Azure Blob Storage. -If you are using file-based storage and your installation scenario includes {HubName}, ensure that the storage option for {OperatorPlatform} is set to `ReadWriteMany`. +If you are using file-based storage and your installation scenario includes {HubName}, ensure that the storage option for {OperatorPlatformNameShort} is set to `ReadWriteMany`. `ReadWriteMany` is the default storage option. In addition, {ODFShort} provides a `ReadWriteMany` or S3-compliant implementation. Also, you can set up NFS storage configuration to support `ReadWriteMany`. This, however, introduces the NFS server as a potential, single point of failure. diff --git a/downstream/modules/platform/con-update-planning.adoc b/downstream/modules/platform/con-update-planning.adoc new file mode 100644 index 0000000000..f06bc3aad7 --- /dev/null +++ b/downstream/modules/platform/con-update-planning.adoc @@ -0,0 +1,10 @@ +[id="con-update-planning"] += Update planning + +Before you begin the update process, review the following considerations to plan and prepare your {PlatformNameShort} deployment: + +* Even if you have a valid license from an earlier version, you must give your credentials or a subscription manifest upon upgrading to the latest version of {PlatformNameShort}. For more information, see link:{URLCentralAuth}/assembly-gateway-licensing#proc-attaching-subscriptions[Attaching your {PlatformName} subscription] in _{TitleCentralAuth}_. + +* Clustered upgrades require special attention to instance and instance groups before upgrading. Ensure you capture your inventory or instance group details before upgrading. For more information, see link:{URLControllerAdminGuide}/controller-clustering[Clustering] in _{TitleControllerAdminGuide}_. + +* If you are currently running {EDAcontroller}, disable all rulebook activations before upgrading to ensure that only new activations run after the upgrade process has completed. This prevents possibilities of orphaned containers running activations from the earlier version. For more information, see link:{URLEDAUserGuide}/eda-rulebook-activations#eda-enable-rulebook-activations[Enabling and disabling rulebook activations] in _{TitleEDAUserGuide}_. \ No newline at end of file diff --git a/downstream/modules/platform/con-why-automation-mesh.adoc b/downstream/modules/platform/con-why-automation-mesh.adoc index 4be04db817..d17cf7ff60 100644 --- a/downstream/modules/platform/con-why-automation-mesh.adoc +++ b/downstream/modules/platform/con-why-automation-mesh.adoc @@ -4,16 +4,18 @@ The {AutomationMesh} component of the {PlatformName} simplifies the process of distributing automation across multi-site deployments. For enterprises with multiple isolated IT environments, {AutomationMesh} provides a consistent and reliable way to deploy and scale up automation across your execution nodes using a peer-to-peer mesh communication network. -When upgrading from version 1.x to the latest version of {PlatformNameShort}, you must migrate the data from your legacy isolated nodes into execution nodes necessary for {AutomationMesh}. You can implement {AutomationMesh} by planning out a network of hybrid and control nodes, then editing the inventory file found in the {PlatformNameShort} installer to assign mesh-related values to each of your execution nodes. +//[ddacosta] There is no upgrade/migration path for 2.5EA so removing this until upgrade/migration is possible. +//When upgrading from version 1.x to the latest version of {PlatformNameShort}, you must migrate the data from your legacy isolated nodes into execution nodes necessary for {AutomationMesh}. You can implement {AutomationMesh} by planning out a network of hybrid and control nodes, then editing the inventory file found in the {PlatformNameShort} installer to assign mesh-related values to each of your execution nodes. [role="_additional-resources"] .Additional resources -* For instructions on how to migrate from isolated nodes to execution nodes, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_upgrade_and_migration_guide/index[Red Hat Ansible Automation Platform Upgrade and Migration Guide]. +//[ddacosta] There is no upgrade/migration path for 2.5EA so removing this until upgrade/migration is possible. +//* For instructions on how to migrate from isolated nodes to execution nodes, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_upgrade_and_migration_guide/index[Red Hat Ansible Automation Platform Upgrade and Migration Guide]. * For information about automation mesh and the various ways to design your automation mesh for your environment: -** For a VM-based installation, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_automation_mesh_guide_for_vm-based_installations/index[{PlatformName} {AutomationMesh} guide for VM-based installations]. +** For a VM-based installation, see the link:{LinkAutomationMesh}. -** For an operator-based installation, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_automation_mesh_for_operator-based_installations/index[{PlatformName} {AutomationMesh} for operator-based installations]. +** For an operator-based installation, see the link:{LinkOperatorMesh} \ No newline at end of file diff --git a/downstream/modules/platform/ini/clustered-nodes.ini b/downstream/modules/platform/ini/clustered-nodes.ini index 738ef3cdab..bd235611ae 100644 --- a/downstream/modules/platform/ini/clustered-nodes.ini +++ b/downstream/modules/platform/ini/clustered-nodes.ini @@ -1,4 +1,4 @@ -[controller] +[automationcontroller] clusternode1.example.com clusternode2.example.com clusternode3.example.com @@ -6,8 +6,7 @@ clusternode3.example.com [all:vars] admin_password='password' -pg_host='' -pg_port='' +pg_host='' pg_database='' pg_username='' diff --git a/downstream/modules/platform/proc-aap-activate-with-credentials.adoc b/downstream/modules/platform/proc-aap-activate-with-credentials.adoc index ca6325214b..e4feb465df 100644 --- a/downstream/modules/platform/proc-aap-activate-with-credentials.adoc +++ b/downstream/modules/platform/proc-aap-activate-with-credentials.adoc @@ -1,13 +1,24 @@ -[id="proc-aap-activate-with-credentials_{context}"] +[id="proc-aap-activate-with-credentials"] = Activate with credentials When {PlatformNameShort} launches for the first time, the {PlatformNameShort} Subscription screen automatically displays. You can use your Red Hat credentials to retrieve and import your subscription directly into {PlatformNameShort}. -.Procedures +[NOTE] +==== +You are opted in for {Analytics} by default when you activate the platform on first time log in. This helps Red Hat improve the product by delivering you a much better user experience. You can opt out, after activating {PlatformNameShort}, by doing the following: + +. From the navigation panel, select {MenuSetSystem}. +. Clear the *Gather data for {Analytics}* option. +. Click btn:[Save]. +==== + +.Procedure +. Log in to {PlatformName}. +. Select *Username / password*. . Enter your Red Hat username and password. -. Click btn:[Get Subscriptions]. +. Select your subscription from the *Subscription* list. + [NOTE] ==== @@ -15,6 +26,13 @@ You can also use your Satellite username and password if your cluster nodes are ==== + . Review the End User License Agreement and select *I agree to the End User License Agreement*. -. The Tracking and Analytics options are checked by default. These selections help Red Hat improve the product by delivering you a much better user experience. You can opt out by deselecting the options. -. Click btn:[Submit]. -. Once your subscription has been accepted, the license screen displays and navigates you to the Dashboard of the {PlatformNameShort} interface. You can return to the license screen by clicking the btn:[Settings] icon *⚙* and selecting the *License* tab from the Settings screen. +. Click btn:[Finish]. + +.Verification +After your subscription has been accepted, subscription details are displayed. A status of _Compliant_ indicates your subscription is in compliance with the number of hosts you have automated within your subscription count. Otherwise, your status will show as _Out of Compliance_, indicating you have exceeded the number of hosts in your subscription. +Other important information displayed include the following: + +Hosts automated:: Host count automated by the job, which consumes the license count +Hosts imported:: Host count considering all inventory sources (does not impact hosts remaining) +Hosts remaining:: Total host count minus hosts automated + \ No newline at end of file diff --git a/downstream/modules/platform/proc-aap-activate-with-manifest.adoc b/downstream/modules/platform/proc-aap-activate-with-manifest.adoc index 0f6b004635..60d3d79665 100644 --- a/downstream/modules/platform/proc-aap-activate-with-manifest.adoc +++ b/downstream/modules/platform/proc-aap-activate-with-manifest.adoc @@ -1,34 +1,45 @@ -[id="proc-aap-activate-with-manifest_{context}"] +[id="proc-aap-activate-with-manifest"] = Activate with a manifest file -If you have a subscriptions manifest, you can upload the manifest file either using the {PlatformName} interface or manually in an Ansible playbook. +If you have a subscriptions manifest, you can upload the manifest file either by using the {PlatformName} interface. + +[NOTE] +==== +You are opted in for {Analytics} by default when you activate the platform on first time log in. This helps Red Hat improve the product by delivering you a much better user experience. You can opt out, after activating {PlatformNameShort}, by doing the following: + +. From the navigation panel, select {MenuSetSystem}. +. Uncheck the *Gather data for {Analytics}* option. +. Click btn:[Save]. +==== .Prerequisites You must have a Red Hat Subscription Manifest file exported from the Red Hat Customer Portal. For more information, see xref:assembly-aap-obtain-manifest-files[Obtaining a manifest file]. -.Uploading with the interface +.Procedure -. Complete steps to generate and download the manifest file . Log in to {PlatformName}. -//[ddacosta] There is no license setting in the test environment for 2.4? Need to verify this selection. In 2.5, I think it will be Settings[Subscription]... -. If you are not immediately prompted for a manifest file, go to menu:Settings[License]. -. Make sure the *Username* and *Password* fields are empty. +. If you are not immediately prompted for a manifest file, go to {MenuSetSubscription}. +. Select *Subscription manifest*. . Click btn:[Browse] and select the manifest file. -. Click btn:[Next]. +. Review the End User License Agreement and select *I agree to the End User License Agreement*. +. Click btn:[Finish]. [NOTE] ==== If the btn:[BROWSE] button is disabled on the License page, clear the *USERNAME* and *PASSWORD* fields. ==== -.Uploading manually +.Verification +After your subscription has been accepted, subscription details are displayed. A status of _Compliant_ indicates your subscription is in compliance with the number of hosts you have automated within your subscription count. Otherwise, your status will show as _Out of Compliance_, indicating you have exceeded the number of hosts in your subscription. +Other important information displayed include the following: + +Hosts automated:: Host count automated by the job, which consumes the license count +Hosts imported:: Host count considering all inventory sources (does not impact hosts remaining) +Hosts remaining:: Total host count minus hosts automated -If you are unable to apply or update the subscription info using the {PlatformName} interface, you can upload the subscriptions manifest manually in an Ansible playbook using the `license` module in the `ansible.controller` collection. +[role="_additional-resources"] +.Next steps +* You can return to the license screen by selecting {MenuSetSubscription} from the navigation panel and clicking btn:[Edit subscription]. ------ -- name: Set the license using a file - license: - manifest: "/tmp/my_manifest.zip" ------ diff --git a/downstream/modules/platform/proc-aap-add-merge-subscriptions.adoc b/downstream/modules/platform/proc-aap-add-merge-subscriptions.adoc index aa6c72a2eb..b4f76c49ad 100644 --- a/downstream/modules/platform/proc-aap-add-merge-subscriptions.adoc +++ b/downstream/modules/platform/proc-aap-add-merge-subscriptions.adoc @@ -1,5 +1,5 @@ -[id="proc-add-merge-subscriptions_{context}"] +[id="proc-add-merge-subscriptions"] = Adding subscriptions to a subscription allocation @@ -12,11 +12,6 @@ Once an allocation is created, you can add the subscriptions you need for {Platf . Enter the number of {PlatformNameShort} Entitlement(s) you plan to add. . Click btn:[Submit]. -.Verification -After your subscription has been accepted, subscription details are displayed. A status of _Compliant_ indicates your subscription is in compliance with the number of hosts you have automated within your subscription count. Otherwise, your status will show as _Out of Compliance_, indicating you have exceeded the number of hosts in your subscription. - -Other important information displayed include the following: - -Hosts automated:: Host count automated by the job, which consumes the license count -Hosts imported:: Host count considering all inventory sources (does not impact hosts remaining) -Hosts remaining:: Total host count minus hosts automated +[role="_additional-resources"] +.Next steps +* xref:proc-aap-generate-manifest-file[Download the manifest file] from Red Hat Subscription Management. \ No newline at end of file diff --git a/downstream/modules/platform/proc-aap-controller-backup.adoc b/downstream/modules/platform/proc-aap-controller-backup.adoc index fc3d72f5e8..65db5c92fd 100644 --- a/downstream/modules/platform/proc-aap-controller-backup.adoc +++ b/downstream/modules/platform/proc-aap-controller-backup.adoc @@ -7,22 +7,21 @@ Use this procedure to back up a deployment of the controller, including jobs, in .Prerequisites -* You must be authenticated with an Openshift cluster. -* The {OperatorPlatform} has been installed to the cluster. -* The {ControllerName} is deployed to using the {OperatorPlatform}. +* You must be authenticated with an OpenShift cluster. +* You have installed {OperatorPlatformNameShort} on the cluster. +* You have deployed {ControllerName} using the {OperatorPlatformNameShort}. .Procedure -. Log in to *{OCP}*. +. Log in to {OCP}. . Navigate to menu:Operators[Installed Operators]. -. Select the {OperatorPlatform} installed on your project namespace. +. Select your {OperatorPlatformNameShort} deployment. . Select the *Automation Controller Backup* tab. . Click btn:[Create AutomationControllerBackup]. . Enter a *Name* for the backup. -. Enter the *Deployment name* of the deployed {PlatformNameShort} instance being backed up. -For example, if your {ControllerName} must be backed up and the deployment name is `aap-controller`, enter 'aap-controller' in the *Deployment name* field. +. In the *Deployment name* field, enter the name of the AutomationController custom resource object of the deployed {PlatformNameShort} instance being backed up. This name was created when you link:{URLOperatorInstallation}/aap-migration#aap-create_controller[created your AutomationController object]. . If you want to use a custom, pre-created pvc: -.. Optionally enter the name of the *Backup persistant volume claim*. -.. Optionally enter the *Backup PVC storage requirements*, and *Backup PVC storage class*. +.. [Optional]: enter the name of the *Backup persistent volume claim*. +.. [Optional]: enter the *Backup PVC storage requirements*, and *Backup PVC storage class*. + [NOTE] ==== @@ -43,14 +42,14 @@ $ df -h | grep "/var/lib/pgsql/data" A backup tarball of the specified deployment is created and available for data recovery or deployment rollback. Future backups are stored in separate tar files on the same pvc. .Verification -. Log in to Red Hat *{OCP}* +. Log in to {OCP}. . Navigate to menu:Operators[Installed Operators]. -. Select the {OperatorPlatform} installed on your project namespace. +. Select your {OperatorPlatformNameShort}. . Select the *AutomationControllerBackup* tab. . Select the backup resource you want to verify. . Scroll to *Conditions* and check that the *Successful* status is `True`. + [NOTE] ==== -If *Successful* is `False`, the backup has failed. Check the {ControllerName} operator logs for the error to fix the issue. +If the status is `Failure`, the backup has failed. Check the {ControllerName} operator logs for the error to fix the issue. ==== diff --git a/downstream/modules/platform/proc-aap-controller-restore.adoc b/downstream/modules/platform/proc-aap-controller-restore.adoc index 00e428b1ef..a6a537f594 100644 --- a/downstream/modules/platform/proc-aap-controller-restore.adoc +++ b/downstream/modules/platform/proc-aap-controller-restore.adoc @@ -7,19 +7,21 @@ Use this procedure to restore a previous controller deployment from an Automatio [NOTE] ==== -The name specified for the new AutomationController custom resource must not match an existing deployment or the recovery process will fail. If the name specified does match an existing deployment, see xref:aap-troubleshoot-backup-recover[Troubleshooting] for steps to resolve the issue. +The name specified for the new AutomationController custom resource must not match an existing deployment. + +If the backup custom resource being restored is a backup of a currently running AutomationController custom resource the recovery process will fail. See xref:aap-troubleshoot-backup-recover[Troubleshooting] for steps to resolve the issue. ==== .Prerequisites -* You must be authenticated with an Openshift cluster. -* The {ControllerName} has been deployed to the cluster. +* You must be authenticated with an OpenShift cluster. +* You have deployed {ControllerName} on the cluster. * An AutomationControllerBackup is available on a PVC in your cluster. .Procedure -. Log in to *{OCP}*. +. Log in to {OCP}. . Navigate to menu:Operators[Installed Operators]. -. Select the {OperatorPlatform} installed on your project namespace. +. Select your {OperatorPlatformNameShort} deployment. . Select the *Automation Controller Restore* tab. . Click btn:[Create AutomationControllerRestore]. . Enter a *Name* for the recovery deployment. @@ -27,7 +29,7 @@ The name specified for the new AutomationController custom resource must not mat + [NOTE] ==== -This should be different from the original deployment name. +This must be different from the original deployment name. ==== + . Select the *Backup source to restore from*. *Backup CR* is recommended. @@ -38,9 +40,9 @@ A new deployment is created and your backup is restored to it. This can take app .Verification -. Log in to Red Hat *{OCP}* +. Log in to Red Hat {OCP}. . Navigate to menu:Operators[Installed Operators]. -. Select the {OperatorPlatform} installed on your project namespace. +. Select your {OperatorPlatformNameShort} deployment. . Select the *AutomationControllerRestore* tab. . Select the restore resource you want to verify. . Scroll to *Conditions* and check that the *Successful* status is `True`. diff --git a/downstream/modules/platform/proc-aap-controller-yaml-backup.adoc b/downstream/modules/platform/proc-aap-controller-yaml-backup.adoc new file mode 100644 index 0000000000..9a51ae5a99 --- /dev/null +++ b/downstream/modules/platform/proc-aap-controller-yaml-backup.adoc @@ -0,0 +1,37 @@ +[id="aap-controller-yaml-backup"] + += Using YAML to back up the {ControllerNameStart} deployment + +See the following procedure for how to back up a deployment of the {ControllerName} using YAML. + +.Prerequisites + +* You must be authenticated with an OpenShift cluster. +* You have installed {OperatorPlatformNameShort} on the cluster. +* You have deployed {ControllerName} using the {OperatorPlatformNameShort}. + +.Procedure + +. Create a file named "backup-automation-controller.yml" with the following contents: ++ +---- +--- +apiVersion: automationcontroller.ansible.com/v1beta1 +kind: AutomationControllerBackup +metadata: + name: AutomationControllerBackup-2024-07-15 + namespace: my-namespace +spec: + deployment_name: controller +---- ++ + +[NOTE] +==== +The "deployment_name" above is the name of the {ControllerName} deployment you intend to backup from. +The namespace above is the one containing the {ControllerName} deployment you intend to back up. +==== + +. Use the `oc apply` command to create the backup object in your cluster: + +`$ oc apply -f backup-automation-controller.yml` diff --git a/downstream/modules/platform/proc-aap-controller-yaml-restore.adoc b/downstream/modules/platform/proc-aap-controller-yaml-restore.adoc new file mode 100644 index 0000000000..bdc5dcb316 --- /dev/null +++ b/downstream/modules/platform/proc-aap-controller-yaml-restore.adoc @@ -0,0 +1,58 @@ +[id="aap-controller-yaml-restore"] + += Using YAML to recover the {ControllerNameStart} deployment +See the following procedure for how to restore a deployment of the {ControllerName} using YAML. + +.Prerequisite +The external database must be a PostgreSQL database that is the version supported by the current release of {PlatformNameShort}. + +[NOTE] +==== +{PlatformNameShort} {PlatformVers} supports {PostgresVers}. +==== + +.Procedure + +The external postgres instance credentials and connection information must be stored in a secret, which is then set on the {ControllerName} spec. + +. Create a `external-postgres-configuration-secret` YAML file, following the template below: ++ +---- +apiVersion: v1 +kind: Secret +metadata: + name: external-restore-postgres-configuration + namespace: <1> +stringData: + host: "" <2> + port: "" <3> + database: "" + username: "" + password: "" <4> + sslmode: "prefer" <5> + type: "unmanaged" +type: Opaque +---- +<1> Namespace to create the secret in. This should be the same namespace you want to deploy to. +<2> The resolvable hostname for your database node. +<3> External port defaults to `5432`. +<4> Value for variable `password` should not contain single or double quotes (', ") or backslashes (\) to avoid any issues during deployment, backup or restoration. +<5> The variable `sslmode` is valid for `external` databases only. The allowed values are: `*prefer*`, `*disable*`, `*allow*`, `*require*`, `*verify-ca*`, and `*verify-full*`. +. Apply `external-postgres-configuration-secret.yml` to your cluster using the `oc create` command. ++ +---- +$ oc create -f external-postgres-configuration-secret.yml +---- +. When creating your `AutomationControllerRestore` custom resource object, specify the secret on your spec, following the example below: ++ +---- +kind: AutomationControllerRestore +apiVersion: automationcontroller.ansible.com/v1beta1 +metadata: + namespace: my-namespace + name: AutomationControllerRestore-2024-07-15 +spec: + deployment_name: restored_controller + backup_name: AutomationControllerBackup-2024-07-15 + postgres_configuration_secret: 'external-restore-postgres-configuration' +---- \ No newline at end of file diff --git a/downstream/modules/platform/proc-aap-create-subscription-allocation.adoc b/downstream/modules/platform/proc-aap-create-subscription-allocation.adoc index 6f3798b198..7fd5155ebd 100644 --- a/downstream/modules/platform/proc-aap-create-subscription-allocation.adoc +++ b/downstream/modules/platform/proc-aap-create-subscription-allocation.adoc @@ -8,5 +8,9 @@ Creating a new subscription allocation allows you to set aside subscriptions and .Procedure . From the link:https://access.redhat.com/management/subscription_allocations/[Subscription Allocations] page, click btn:[New Subscription Allocation]. . Enter a name for the allocation so that you can find it later. -. Select *Type: Satellite 6.8* as the management application. +. Select *Type: Satellite {SatelliteVers}* as the management application. . Click btn:[Create]. + +[role="_additional-resources"] +.Next steps +* xref:proc-add-merge-subscriptions[Add the subscriptions] needed for {PlatformNameShort} to run properly. \ No newline at end of file diff --git a/downstream/modules/platform/proc-aap-create_controller.adoc b/downstream/modules/platform/proc-aap-create_controller.adoc index 4ca79bcf78..ed886963ad 100644 --- a/downstream/modules/platform/proc-aap-create_controller.adoc +++ b/downstream/modules/platform/proc-aap-create_controller.adoc @@ -4,16 +4,16 @@ [role=_abstract] -Use the following steps to create an AutomationController custom resource object. +Use the following steps to create an *AutomationController* custom resource object. .Procedure . Log in to *{OCP}*. . Navigate to menu:Operators[Installed Operators]. -. Select the {OperatorPlatform} installed on your project namespace. +. Select the {OperatorPlatformNameShort} installed on your project namespace. . Select the *Automation Controller* tab. -. Click btn:[Create AutomationController]. -. Enter a name for the new deployment. -. In *Advanced configurations*, do the following: -.. From the *Admin Password Secret* list, select your xref:create-secret-key-secret_aap-migration[secret key secret]. -.. From the *Database Configuration Secret* list, select the xref:create-postresql-secret_aap-migration[postgres configuration secret]. -. Click btn:[Create]. +. Click btn:[Create AutomationController]. You can create the object through the *Form view* or *YAML view*. The following inputs are available through the *Form view*. +.. Enter a name for the new deployment. +.. In *Advanced configurations*: +... From the *Secret Key* list, select your xref:create-secret-key-secret_aap-migration[secret key secret]. +... From the *Old Database Configuration Secret* list, select the xref:create-postresql-secret_aap-migration[old postgres configuration secret]. +.. Click btn:[Create]. diff --git a/downstream/modules/platform/proc-aap-create_eda.adoc b/downstream/modules/platform/proc-aap-create_eda.adoc new file mode 100644 index 0000000000..03b6bb5b20 --- /dev/null +++ b/downstream/modules/platform/proc-aap-create_eda.adoc @@ -0,0 +1,19 @@ +[id="aap-create_eda"] + += Creating an EDA object + +[role=_abstract] + +Use the following steps to create an *EDA* custom resource object. + +.Procedure +. Log in to *{OCP}*. +. Navigate to menu:Operators[Installed Operators]. +. Select the {OperatorPlatformNameShort} installed on your project namespace. +. Select the *Automation Hub* tab. +. Click btn:[Create AutomationHub]. You can create the object through the *Form view* or *YAML view*. The following inputs are available through the *Form view*. +.. Enter a name for the new deployment. +.. In *Advanced configurations*: +... From the *Admin Password Secret* list, select your xref:create-secret-key-secret_aap-migration[secret key secret]. +... From the *Database Configuration Secret* list, select the xref:create-postresql-secret_aap-migration[postgres configuration secret]. +.. Click btn:[Create]. diff --git a/downstream/modules/platform/proc-aap-create_hub.adoc b/downstream/modules/platform/proc-aap-create_hub.adoc index 987590e5f1..cb2593b298 100644 --- a/downstream/modules/platform/proc-aap-create_hub.adoc +++ b/downstream/modules/platform/proc-aap-create_hub.adoc @@ -4,14 +4,17 @@ [role=_abstract] -Use the following steps to create an AutomationHub custom resource object. +Use the following steps to create an *AutomationHub* custom resource object. .Procedure . Log in to *{OCP}*. . Navigate to menu:Operators[Installed Operators]. -. Select the {OperatorPlatform} installed on your project namespace. +. Select the {OperatorPlatformNameShort} installed on your project namespace. . Select the *Automation Hub* tab. -. Click btn:[Create AutomationHub]. -. Enter a name for the new deployment. -. In *Advanced configurations*, select your xref:create-secret-key-secret_aap-migration[secret key secret] and xref:create-postresql-secret_aap-migration[postgres configuration secret]. -. Click btn:[Create]. +. Click btn:[Create AutomationHub]. You can create the object through the *Form view* or *YAML view*. +The following inputs are available through the *Form view*. +.. Enter a name for the new deployment. +.. In *Advanced configurations*: +... From the *Admin Password Secret* list, select your xref:create-secret-key-secret_aap-migration[secret key secret]. +... From the *Database Configuration Secret* list, select the xref:create-postresql-secret_aap-migration[postgres configuration secret]. +.. Click btn:[Create]. diff --git a/downstream/modules/platform/proc-aap-generate-manifest-file.adoc b/downstream/modules/platform/proc-aap-generate-manifest-file.adoc index 5d96ca2ab4..cf70850995 100644 --- a/downstream/modules/platform/proc-aap-generate-manifest-file.adoc +++ b/downstream/modules/platform/proc-aap-generate-manifest-file.adoc @@ -1,5 +1,5 @@ -[id="proc-generate-manifest-file_{context}"] +[id="proc-aap-generate-manifest-file"] = Downloading a manifest file @@ -10,8 +10,9 @@ After an allocation is created and has the appropriate subscriptions on it, you . From the link:https://access.redhat.com/management/subscription_allocations/[Subscription Allocations] page, click on the name of the *Subscription Allocation* to which you would like to generate a manifest. . Click the *Subscriptions* tab. . Click btn:[Export Manifest] to download the manifest file. ++ +This downloads a file _manifest__.zip_ to your default downloads folder. -[NOTE] -==== -The file is saved to your default downloads folder and can now be uploaded to xref:proc-aap-activate-with-manifest_activate-aap[activate {PlatformName}]. -==== +[role="_additional-resources"] +.Next steps +* xref:proc-aap-activate-with-manifest[Upload the manifest file] to activate {PlatformName}. diff --git a/downstream/modules/platform/proc-aap-hub-backup.adoc b/downstream/modules/platform/proc-aap-hub-backup.adoc index 9f2b8a11a8..f849ff8ba2 100644 --- a/downstream/modules/platform/proc-aap-hub-backup.adoc +++ b/downstream/modules/platform/proc-aap-hub-backup.adoc @@ -7,14 +7,14 @@ Use this procedure to back up a deployment of the hub, including all hosted Ansi .Prerequisites -* You must be authenticated with an Openshift cluster. -* The {OperatorPlatform} has been installed to the cluster. -* The {HubName} is deployed to using the {OperatorPlatform}. +* You must be authenticated with an OpenShift cluster. +* You have installed {OperatorPlatformNameShort} on the cluster. +* You have deployed {HubName} using the {OperatorPlatformNameShort}. .Procedure -. Log in to *{OCP}*. +. Log in to {OCP}. . Navigate to menu:Operators[Installed Operators]. -. Select the {OperatorPlatform} installed on your project namespace. +. Select your {OperatorPlatformNameShort} deployment. . Select the *Automation Hub Backup* tab. . Click btn:[Create AutomationHubBackup]. . Enter a *Name* for the backup. @@ -24,4 +24,4 @@ For example, if your {HubName} must be backed up and the deployment name is `aap .. Optionally, enter the name of the *Backup persistent volume claim*, *Backup persistent volume claim namespace*, *Backup PVC storage requirements*, and *Backup PVC storage class*. . Click btn:[Create]. + -A backup of the specified deployment is created and available for data recovery or deployment rollback. +This creates a backup of the specified deployment and is available for data recovery or deployment rollback. diff --git a/downstream/modules/platform/proc-aap-hub-restore.adoc b/downstream/modules/platform/proc-aap-hub-restore.adoc index 7ae89f9475..7313dd809c 100644 --- a/downstream/modules/platform/proc-aap-hub-restore.adoc +++ b/downstream/modules/platform/proc-aap-hub-restore.adoc @@ -12,14 +12,14 @@ The name specified for the new AutomationHub custom resource must not match an e .Prerequisites -* You must be authenticated with an Openshift cluster. -* The {HubName} has been deployed to the cluster. +* You must be authenticated with an OpenShift cluster. +* You have deployed {HubName} on the cluster. * An AutomationHubBackup is available on a PVC in your cluster. .Procedure -. Log in to *{OCP}*. +. Log in to {OCP}. . Navigate to menu:Operators[Installed Operators]. -. Select the {OperatorPlatform} installed on your project namespace. +. Select your {OperatorPlatformNameShort} deployment. . Select the *Automation Hub Restore* tab. . Click btn:[Create AutomationHubRestore]. . Enter a *Name* for the recovery deployment. @@ -27,4 +27,4 @@ The name specified for the new AutomationHub custom resource must not match an e . Enter the *Backup Name* of the AutomationHubBackup object. . Click btn:[Create]. + -A new deployment is created and your backup is restored to it. +This creates a new deployment and restores your backup to it. diff --git a/downstream/modules/platform/proc-aap-migrate-SAML-users.adoc b/downstream/modules/platform/proc-aap-migrate-SAML-users.adoc new file mode 100644 index 0000000000..84541a69cb --- /dev/null +++ b/downstream/modules/platform/proc-aap-migrate-SAML-users.adoc @@ -0,0 +1,41 @@ + + +[id="proc-migrate-SAML-users_{context}"] + += Migrating Single Sign-On (SSO) users + +[role="_abstract"] + +When upgrading from {PlatformNameShort} 2.4 to 2.5, you must migrate your Single Sign-On (SSO) user accounts if you want to continue using SSO capabilities after the upgrade. Follow the steps in this procedure to ensure a smooth SSO user migration. + +== Key considerations + +*SSO configurations are not migrated automatically during upgrade to 2.5:* While the legacy authentication settings are carried over during the upgrade process and allow seamless initial access to the platform after upgrade, SSO configurations must be manually migrated over to a new {PlatformNameShort} 2.5 authentication configuration. The legacy configuration acts as a reference to preserve existing authentication capabilities and facilitate the migration process. The legacy authentication configuration should not be modified directly or used after migration is complete. + +*SSO migration is not currently supported in the UI:* While migration of SSO accounts is supported in 2.5, the configuration is not supported through the platform UI and must be done through the API `/api/gateway/v1/authenticators/`. + +*Migration of SSO must happen before users log in and start account linking:* You must enable the *Auto migrate users to* setting _after_ configuring SSO in 2.5 and _before_ any users log in. + +.Prerequisites + +You have configured a SSO authentication method in the {Gateway} following the steps in link:{URLCentralAuth}/gw-configure-authentication#gw-config-authentication-type[Configuring an authentication type]. This will be the configuration that you will migrate your previous SSO users to. + +[NOTE] +==== +{PlatformNameShort} 2.4 SSO configurations are renamed during the upgrade process and are displayed in the *Authentication Methods* list view with a prefix to indicate a legacy configuration: for example, `legacy_sso-saml-`. The *Authentication type* is also listed as *legacy sso*. These configurations can not be modified. +==== + +.Procedure + +. Log in to the {Gateway} API. +. Go to `/api/gateway/v1/authenticators/`, locate the legacy authenticator and click the link. +. This opens the HTML form for that authenticator. +. Select the new {Gateway} authenticator from the *Auto migrate users to* list. +. Click btn:[PUT]. + +Once you set up the auto migrate functionality, you should be able to login with SSO in the {Gateway} and it will automatically link any matching accounts from the legacy SSO authenticator. + +[role="_additional-resources"] +.Additional resources + +Refer to link:https://interact.redhat.com/share/baxthgXBQZ3kSRKPLn5L[{PlatformNameShort} 2.4 to 2.5. Linking accounts post upgrade, and Setting up SAML authentication] for a demonstration of the post upgrade steps. diff --git a/downstream/modules/platform/proc-aap-migrate-admin-users.adoc b/downstream/modules/platform/proc-aap-migrate-admin-users.adoc new file mode 100644 index 0000000000..93d98e434d --- /dev/null +++ b/downstream/modules/platform/proc-aap-migrate-admin-users.adoc @@ -0,0 +1,39 @@ +[id="aap-migrate-admin-users_{context}"] + += Migrating admin users + +[role="_abstract"] +Upgrades from {PlatformNameShort} 2.4 to 2.5 allows for the migration of administrators for each component with their existing component-level admin privileges maintained. However, escalation of privileges to {Gateway} administrator is not automatic during the upgrade process. This ensures a secure privilege escalation process that can be customized to meet the organization's specific needs. + +.Prerequisites + +* Review current admin roles for the individual services in your current deployment. +* Confirm the users who will require {Gateway} admin rights post-upgrade. + + +== Key considerations + +*Component-level admin privileges are retained:* Administrators for {ControllerName} and {HubName} will retain their existing admin privileges for those respective services post-upgrade. For example, an admin of {ControllerName} will continue to have full administration privileges for {ControllerName} resources. + +[NOTE] +==== +Users previously designated as {ControllerName} or {HubName} administrators are labeled as *Normal* in the *User type* column of the Users list view. This is a mischaracterization. You can verify that these users have, in fact, retained their service level administrator privileges, by editing the account: +==== + + +.Procedure + +. From the navigation panel of the {Gateway}, select {MenuAMUsers}. +. Select the check box for the user that you want to modify. +. Click the Pencil icon and select *Edit user*. +. The Edit user page is displayed where you can see the service level administrator privileges assigned by the *User type* checkboxes. See link:{URLCentralAuth}/gw-managing-access#gw-editing-a-user[Editing a user] for more information on these user types. + +[NOTE] +==== +Only a platform administrator can escalate your privileges. +==== + +*Escalation to {Gateway} admin must be manually configured post-upgrade:* During the upgrade process, admin privileges for individual services are not automatically translated to platform administrator privileges. Escalation to {Gateway} admin must be granted by the platform administrator after upgrade and migration. Each service admin retains the original scope of their access until the access is changed. + +As a platform administrator, you can escalate a user's privileges by selecting the *{PlatformNameShort} Administrator* checkbox. + diff --git a/downstream/modules/platform/proc-aap-migration-backup.adoc b/downstream/modules/platform/proc-aap-migration-backup.adoc index 19fba83f60..5a4ed163fe 100644 --- a/downstream/modules/platform/proc-aap-migration-backup.adoc +++ b/downstream/modules/platform/proc-aap-migration-backup.adoc @@ -1,11 +1,11 @@ [id="aap-migration-backup"] [role="_abstract"] -= Migrating to Ansible Automation Platform Operator += Migrating to {OperatorPlatformNameShort} .Prerequisites -To migrate {PlatformNameShort} deployment to {OperatorPlatform}, you must have the following: +To migrate {PlatformNameShort} deployment to {OperatorPlatformNameShort}, you must have the following: * Secret key secret * Postgresql configuration @@ -18,21 +18,9 @@ You can store the secret key information in the inventory file before the initia If you are unable to remember your secret key or have trouble locating your inventory file, contact link:https://access.redhat.com/[Ansible support] through the Red Hat Customer portal. ==== -Before migrating your data from {PlatformNameShort} 2.x or earlier, you must back up your data for loss prevention. To backup your data, do the following: +Before migrating your data from {PlatformNameShort} 2.4, you must back up your data for loss prevention. .Procedure + . Log in to your current deployment project. -. Run `setup.sh` to create a backup of your current data or deployment: -+ -For on-prem deployments of version 2.x or earlier: -+ ------ -$ ./setup.sh -b ------ -+ -For OpenShift deployments before version 2.0 (non-operator deployments): -+ ------ -./setup_openshift.sh -b ------ -//reminder - add a cross reference statement to new Backup and Restore doc once published. "For Openshift Operator installations for version 2.0 and later, refer to" +. Run `$ ./setup.sh -b` to create a backup of your current data or deployment. \ No newline at end of file diff --git a/downstream/modules/platform/proc-aap-migration.adoc b/downstream/modules/platform/proc-aap-migration.adoc index 37b64d6cc4..c92db2ab4e 100644 --- a/downstream/modules/platform/proc-aap-migration.adoc +++ b/downstream/modules/platform/proc-aap-migration.adoc @@ -4,4 +4,4 @@ [role=_abstract] -After you have set your secret key, postgresql credentials, verified network connectivity and installed the {OperatorPlatform}, you must create a custom resource controller object before you can migrate your data. +After you have set your secret key, postgresql credentials, verified network connectivity, and installed the {OperatorPlatformNameShort}, you must create a custom resource controller object before you can migrate your data. diff --git a/downstream/modules/platform/proc-aap-platform-gateway-backup.adoc b/downstream/modules/platform/proc-aap-platform-gateway-backup.adoc index 6274e4e6e7..5d58c8d8d2 100644 --- a/downstream/modules/platform/proc-aap-platform-gateway-backup.adoc +++ b/downstream/modules/platform/proc-aap-platform-gateway-backup.adoc @@ -1,3 +1,56 @@ [id="aap-platform-gateway-backup_{context}"] -= Backing up your AnsibleAutomationPlatform resource += Backing up your {PlatformNameShort} deployment +Regularly backing up your *{PlatformNameShort}* deployment is vital to protect against unexpected data loss and application errors. *{PlatformNameShort}* hosts any enabled components (such as, {ControllerName}, {HubName}, and {EDAName}), when you back up *{PlatformNameShort}* the operator will also back up these components. + +.Prerequisites +* You must be authenticated on OpenShift cluster. +* You have installed {OperatorPlatformNameShort} on the cluster. +* You have deployed a *{PlatformNameShort}* instance using the {OperatorPlatformNameShort}. + +.Procedure +. Log in to {OCP}. +. Navigate to menu:Operators[Installed Operators]. +. Select your {OperatorPlatformNameShort} deployment. +. Go to your *All Instances* tab, and click btn:[Create New]. +. Select *{PlatformNameShort} Backup* from the list. ++ +[NOTE] +==== +When creating the *{PlatformNameShort} Backup* resource it also creates backup resources for each of the nested components that are enabled. +==== ++ +. In the *Name* field, enter a name for the backup. +. In the *Deployment name* field, enter the name of the deployed {PlatformNameShort} instance being backed up. For example if your {PlatformNameShort} deployment must be backed up and the deployment name is aap, enter 'aap' in the *Deployment name* field. +. Click btn:[Create]. + +This results in an *AnsibleAutomationPlatformBackup* resource. The the resource YAML is similar to the following: + +---- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatformBackup +metadata: + name: backup + namespace: aap +spec: + no_log: true + deployment_name: aap +---- + +.Verification +To verify that your backup was successful you can: + +. Log in to {OCP}. +. Navigate to menu:Operators[Installed Operators]. +. Select your {OperatorPlatformNameShort} deployment. +. Click *All Instances*. + +The *All Instances* page displays the main backup and the backups for each component with the name you specified when creating your backup resource. +The status for the following instances must be either *Running* or *Successful*: + +* AnsibleAutomationPlatformBackup +* AutomationControllerBackup +* EDABackup +* AutomationHubBackup + + diff --git a/downstream/modules/platform/proc-aap-platform-gateway-restore.adoc b/downstream/modules/platform/proc-aap-platform-gateway-restore.adoc index 8f0d5b9ce8..3c0477773b 100644 --- a/downstream/modules/platform/proc-aap-platform-gateway-restore.adoc +++ b/downstream/modules/platform/proc-aap-platform-gateway-restore.adoc @@ -1,3 +1,41 @@ -[id="aap-platform-gateway-restore"] +[id="aap-platform-gateway-restore_{context}"] -= Recovering your AnsibleAutomationPlatform resource += Recovering your {PlatformNameShort} deployment +*{PlatformNameShort}* manages any enabled components (such as, {ControllerName}, {HubName}, and {EDAName}), when you recover *{PlatformNameShort}* you also restore these components. + +In previous versions of the {OperatorPlatformNameShort}, it was necessary to create a restore object for each component of the platform. +Now, you create a single *AnsibleAutomationPlatformRestore* resource, which creates and manages the other restore objects: + +* AutomationControllerRestore +* AutomationHubRestore +* EDARestore + +.Prerequisites +* You must be authenticated with an OpenShift cluster. +* You have installed the {OperatorPlatformNameShort} on the cluster. +* The *AnsibleAutomationPlatformBackups* deployment is available in your cluster. + +.Procedure +. Log in to {OCP}. +. Navigate to menu:Operators[Installed Operators]. +. Select your {OperatorPlatformNameShort} deployment. +. Go to your *All Instances* tab, and click btn:[Create New]. +. Select *{PlatformNameShort} Restore* from the list. +. For *Name* enter the name for the recovery deployment. +. For *New {PlatformNameShort} Name* enter the new name for your {PlatformNameShort} instance. +. *Backup Source* defaults to *CR*. +. For *Backup name* enter the name your chose when creating the backup. +. Click btn:[Create]. + +Your backups starts restoring under the *AnsibleAutomationPlatformRestores* tab. + +[NOTE] +==== +The recovery is not complete until all the resources are successfully restored. Depending on the size of your database this this can take some time. +==== + +.Verification +To verify that your recovery was successful you can: + +. Go to menu:Workloads[Pods]. +. Confirm that all pods are in a *Running* or *Completed* state. diff --git a/downstream/modules/platform/proc-access-hub-operator-ui.adoc b/downstream/modules/platform/proc-access-hub-operator-ui.adoc index 351fb0fd6a..8593a2792c 100644 --- a/downstream/modules/platform/proc-access-hub-operator-ui.adoc +++ b/downstream/modules/platform/proc-access-hub-operator-ui.adoc @@ -1,10 +1,11 @@ [id="proc-access-hub-operator-ui_{context}"] -= Accessing the {HubName} user interface += Finding the {HubName} route -You can access the {HubName} interface once all pods have successfully launched. +You can access the {HubName} through the {Gateway} or through the following procedure. .Procedure +. Log into {OCP}. . Navigate to menu:Networking[Routes]. . Under *Location*, click on the URL for your {HubName} instance. diff --git a/downstream/modules/platform/proc-account-linking.adoc b/downstream/modules/platform/proc-account-linking.adoc new file mode 100644 index 0000000000..010384d2b5 --- /dev/null +++ b/downstream/modules/platform/proc-account-linking.adoc @@ -0,0 +1,52 @@ +:_newdoc-version: 2.18.3 +:_template-generated: 2024-10-08 +:_mod-docs-content-type: PROCEDURE + +[id="account-linking_{context}"] += Linking your account + +{PlatformNameShort} 2.5 provides a centralized location for users, teams and organizations to access the platform's services and features. +//[ddacosta] Moved this statement to the assembly intro +//When you upgrade from a previous version of {PlatformNameShort}, your existing account is automatically migrated to a single platform account. However, if you have multiple component accounts (such as, {ControllerName}, {HubName}, and {EDAName}), your accounts must be linked to use the centralized features of the platform. + +The first time you log in to {PlatformNameShort} 2.5, the platform searches through the existing services to locate a user account with the credentials you entered. When there is a match to an existing account, that account is registered and becomes centrally managed by the platform. Any subsequent component accounts in the system are orphaned and cannot be used to log into the platform. + +To address this problem, use the account linking procedure to authenticate from any of your existing component accounts and still be recognized by the platform. Linking accounts associates existing component accounts with the same user profile. + +.Prerequisites + +* You have completed the upgrade process and have a legacy {PlatformNameShort} account and credentials. + +.Procedure +If you have completed the upgrade process and have a legacy {PlatformNameShort} subscription, follow the account linking procedure below to migrate your account to {PlatformNameShort} 2.5. + +. Navigate to the login page for {PlatformNameShort}. +. In the login modal, select either *I have an {ControllerName} account* or *I have an {HubName} account* based on the credentials you have. +. On the next screen, enter the legacy credentials for the component account you selected and click btn:[Log in]. ++ +[NOTE] +==== +If you are logging in using OIDC credentials, see link:https://access.redhat.com/solutions/7092980[How to fix broken OIDC redirect after upgrading to AAP 2.5]. +==== ++ +. If you have successfully linked your account, the next screen shows your username with a green checkmark beside it. If you have other legacy accounts that you want to link, enter those account credentials and click btn:[Link] to link them to your centralized {Gateway} account. +. Click btn:[Submit] to complete linking your legacy accounts. +. After your accounts are linked, depending on your authentication method, you might be prompted to create a new username and password. These credentials will replace your legacy credentials for each component account. + +You can also link your legacy account manually by taking the following steps: + +. Select your user icon at the top right of your screen, and select *User details*. +. Select the btn:[More Actions] icon *{MoreActionsIcon}* > *Link user accounts*. +. Enter the credentials for the account that you want to link. + +If you encounter an error message telling you that your account could not be authenticated, contact your platform administrator. + +[NOTE] +==== +If you log into {PlatformNameShort} for the first time and are prompted to change your username, this is an indication that another user has already logged into {PlatformNameShort} with the same username. To proceed with account migration, follow the prompts to change your username. {PlatformNameShort} uses your password to authenticate which account or accounts belong to you. +==== + +.A diagram of the account linking flow +image:account-linking-flow.png[Account linking flow] + +After you have migrated your user account, you can manage your account from the *Access Management* menu. See link:{URLCentralAuth}/gw-managing-access[Managing access with role based access control]. diff --git a/downstream/modules/platform/proc-add-controller-access-token.adoc b/downstream/modules/platform/proc-add-controller-access-token.adoc index 6c11a89fc3..6910fd3dcb 100644 --- a/downstream/modules/platform/proc-add-controller-access-token.adoc +++ b/downstream/modules/platform/proc-add-controller-access-token.adoc @@ -1,24 +1,21 @@ [id="proc-add-controller-access-token_{context}"] -= Connecting {OperatorResourceShort} to {ControllerName} += Connecting {OperatorResourceShort} to {Gateway} -To connect {OperatorResourceShort} with {ControllerName} you need to create a k8s secret with the connection information for your {ControllerName} instance. +To connect {OperatorResourceShort} with {Gateway} you need to create a k8s secret with the connection information for your {ControllerName} instance. + +NOTE: You can only create OAuth 2 Tokens for your own user through the API or UI, which means you can only configure or view tokens from your own user profile. .Procedure -To create an OAuth2 token for your user in the {ControllerName} UI: +To create an OAuth2 token for your user in the {Gateway} UI: -. In the navigation panel, select menu:Access[Users]. +. Log in to {OCP}. +. In the navigation panel, select menu:Access Management[Users]. . Select the username you want to create a token for. -. Click on btn:[Tokens], then click btn:[Add]. +. Select menu:Tokens[Automation Execution] +. Click btn:[Create Token]. . You can leave *Applications* empty. Add a description and select *Read* or *Write* for the *Scope*. -Alternatively, you can create a OAuth2 token at the command-line by using the `create_oauth2_token` manage command: - ----- -$ controller-manage create_oauth2_token --user example_user -New OAuth2 token for example_user: j89ia8OO79te6IAZ97L7E8bMgXCON2 ----- - [NOTE] ==== Make sure you provide a valid user when creating tokens. diff --git a/downstream/modules/platform/proc-add-eda-safe-plugin-var.adoc b/downstream/modules/platform/proc-add-eda-safe-plugin-var.adoc new file mode 100644 index 0000000000..5a17498aee --- /dev/null +++ b/downstream/modules/platform/proc-add-eda-safe-plugin-var.adoc @@ -0,0 +1,46 @@ + +[id="proc-add-eda-safe-plugin-var"] + += Adding a safe plugin variable to {EDAcontroller} + +When using `redhat.insights_eda` or similar plugins to run rulebook activations in {EDAcontroller}, you must add a safe plugin variable to a directory in {PlatformNameShort}. This ensures connection between {EDAcontroller} and the source plugin, and displays port mappings correctly. + +// Procedure for RPM installer +ifdef::aap-install[] +.Procedure + +. Create a directory for the safe plugin variable: `mkdir -p ./group_vars/automationedacontroller` +. Create a file within that directory for your new setting (for example, `touch ./group_vars/automationedacontroller/custom.yml`) +. Add the variable `automationedacontroller_additional_settings` to extend the default `settings.yaml` template for {EDAcontroller} and add the `SAFE_PLUGINS` field with a list of plugins to enable. For example: ++ +---- +automationedacontroller_additional_settings: + SAFE_PLUGINS: + - ansible.eda.webhook + - ansible.eda.alertmanager +---- ++ +[NOTE] +==== +You can also extend the `automationedacontroller_additional_settings` variable beyond `SAFE_PLUGINS` in the Django configuration file `/etc/ansible-automation-platform/eda/settings.yaml`. +==== +endif::aap-install[] + + +// Procedure for Containerized installer +ifdef::container-install[] +.Procedure + +. Create a directory for the safe plugin variable: ++ +---- +mkdir -p ./group_vars/automationeda +---- ++ +. Create a file within that directory for your new setting (for example, `touch ./group_vars/automationeda/custom.yml`) +. Add the variable `eda_safe_plugins` with a list of plugins to enable. For example: ++ +---- +eda_safe_plugins: ['ansible.eda.webhook', 'ansible.eda.alertmanager'] +---- +endif::container-install[] diff --git a/downstream/modules/platform/proc-add-operator-execution-nodes.adoc b/downstream/modules/platform/proc-add-operator-execution-nodes.adoc index 0c649ec0ed..007f39d0e8 100644 --- a/downstream/modules/platform/proc-add-operator-execution-nodes.adoc +++ b/downstream/modules/platform/proc-add-operator-execution-nodes.adoc @@ -1,15 +1,15 @@ [id="add-operator-execution-nodes_{context}"] .Prerequisites -* An {ControllerName} instance -* The receptor collection package is installed -* The `ansible-runner` package is installed +* An {ControllerName} instance. +* The receptor collection package is installed. +* The {PlatformNameShort} repository `ansible-automation-platform-{PlatformVers}-for-rhel-{RHEL-RELEASE-NUMBER}-x86_64-rpms` is enabled. .Procedure . Log in to {PlatformName}. . In the navigation panel, select {MenuInfrastructureInstances}. . Click btn:[Add]. -. Input the VM name in the *Host Name* field. +. Input the Execution Node domain name or IP in the *Host Name* field. . Optional: Input the port number in the *Listener Port* field. . Click btn:[Save]. . Click the download icon image:download.png[download,15,15]next to *Install Bundle*. This starts a download, take note of where you save the file @@ -17,8 +17,8 @@ + [NOTE] ==== -To run the `install_receptor.yml` playbook you need to install the receptor collection from {Galaxy}: -`Ansible-galaxy collection install -r requirements.txt` +To run the `install_receptor.yml` playbook you must install the receptor collection from {Galaxy}: +`Ansible-galaxy collection install -r requirements.yml` ==== . Update the playbook with your user name and SSH private key file. Note that `ansible_host` pre-populates with the hostname you input earlier. + @@ -26,7 +26,7 @@ To run the `install_receptor.yml` playbook you need to install the receptor col all: hosts: remote-execution: - ansible_host: example_host_name + ansible_host: example_host_name # Same with configured in AAP WebUI ansible_user: #user provided Ansible_ssh_private_key_file: ~/.ssh/id_example ---- @@ -34,15 +34,21 @@ all: . To install the bundle run: + ---- -ansible-playbook install_receptor.yml -i inventory +ansible-playbook install_receptor.yml -i inventory.yml ---- . When installed you can now upgrade your execution node by downloading and re-running the playbook for the instance you created. .Verification +To verify receptor service status run the following command: +---- +sudo systemctl status receptor.service +---- +Make sure the service is in `active (running)` state + To verify if your playbook runs correctly on your new node run the following command: ---- watch podman ps ---- .Additional resources -* For more information about managing instance groups see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-instance-groups[Managing Instance Groups] section of the Automation Controller User Guide. +* For more information about managing instance groups see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/using_automation_execution/index#controller-instance-groups[Managing Instance Groups] section of the Automation Controller User Guide. diff --git a/downstream/modules/platform/proc-attaching-subscriptions.adoc b/downstream/modules/platform/proc-attaching-subscriptions.adoc new file mode 100644 index 0000000000..488f21995b --- /dev/null +++ b/downstream/modules/platform/proc-attaching-subscriptions.adoc @@ -0,0 +1,88 @@ +// emurtoug removed this assembly from the Planning guide to avoid duplication of subscription content added to Access management and authentication + +[id="proc-attaching-subscriptions"] + += Attaching your {PlatformName} subscription + +[role="_abstract"] +You *must* have valid subscriptions attached on all nodes before installing {PlatformName}. Attaching your {PlatformNameShort} subscription provides access to subscription-only resources necessary to proceed with the installation. + +//[ddacosta] Removing this note until it can be verified that SCA is available with AAP +// [NOTE] +// ==== +// Attaching a subscription is unnecessary if you have enabled Simple Content Access Mode on your Red Hat account. Once enabled, you will need to register your systems to either Red Hat Subscription Management (RHSM) or Satellite before installing the {PlatformNameShort}. For more information, see link:https://access.redhat.com/articles/simple-content-access[Simple Content Access]. +// ==== + +.Procedure + +. Make sure your system is registered: ++ +----- +$ sudo subscription-manager register --username <$INSERT_USERNAME_HERE> --password <$INSERT_PASSWORD_HERE> +----- ++ +. Obtain the `pool_id` for your {PlatformName} subscription: ++ +----- +$ sudo subscription-manager list --available --all | grep "Ansible Automation Platform" -B 3 -A 6 +----- ++ +[NOTE] +==== +Do not use MCT4022 as a `pool_id` for your subscription because it can cause {PlatformNameShort} subscription attachment to fail. +==== ++ +.Example +An example output of the `*subsciption-manager list*` command. Obtain the `pool_id` as seen in the `Pool ID:` section: ++ +----- +Subscription Name: Red Hat Ansible Automation, Premium (5000 Managed Nodes) + Provides: Red Hat Ansible Engine + Red Hat Ansible Automation Platform + SKU: MCT3695 + Contract: ```` + Pool ID: + Provides Management: No + Available: 4999 + Suggested: 1 +----- ++ +. Attach the subscription: ++ +----- +$ sudo subscription-manager attach --pool= +----- ++ +You have now attached your {PlatformName} subscriptions to all nodes. ++ +. To remove this subscription, enter the following command: ++ +----- +$ sudo subscription-manager remove --pool= +----- + +.Verification + +* Verify the subscription was successfully attached: + +----- +$ sudo subscription-manager list --consumed +----- + +.Troubleshooting + +* If you are unable to locate certain packages that came bundled with the {PlatformNameShort} installer, or if you are seeing a `_Repositories disabled by configuration_` message, try enabling the repository by using the command: ++ +{PlatformName} {PlatformVers} for RHEL 8 ++ +[literal, options="nowrap" subs="+attributes"] +---- +$ sudo subscription-manager repos --enable ansible-automation-platform-{PlatformVers}-for-rhel-8-x86_64-rpms +---- ++ +{PlatformName} {PlatformVers} for RHEL 9 ++ +[literal, options="nowrap" subs="+attributes"] +---- +$ sudo subscription-manager repos --enable ansible-automation-platform-{PlatformVers}-for-rhel-9-x86_64-rpms +---- diff --git a/downstream/modules/platform/proc-backup-aap-container.adoc b/downstream/modules/platform/proc-backup-aap-container.adoc new file mode 100644 index 0000000000..d8aabe1e47 --- /dev/null +++ b/downstream/modules/platform/proc-backup-aap-container.adoc @@ -0,0 +1,23 @@ +[id="proc-backup-aap-container"] + += Backing up container-based {PlatformNameShort} + +Perform a backup of your {ContainerBase} of {PlatformNameShort}. + +.Procedure + +. Go to the {PlatformName} installation directory on your {RHEL} host. + +. Run the `backup` playbook: ++ +---- +$ ansible-playbook -i ansible.containerized_installer.backup +---- + +This backs up the important data deployed by the containerized installer such as: + +* PostgreSQL databases +* Configuration files +* Data files + +By default, the backup directory is set to `~/backups`. You can change this by using the `backup_dir` variable in your `inventory` file. \ No newline at end of file diff --git a/downstream/modules/platform/proc-backup-aap-rpm.adoc b/downstream/modules/platform/proc-backup-aap-rpm.adoc new file mode 100644 index 0000000000..54532340b5 --- /dev/null +++ b/downstream/modules/platform/proc-backup-aap-rpm.adoc @@ -0,0 +1,23 @@ +[id="proc-backup-aap-rpm"] + += Backing up RPM-based {PlatformNameShort} + +Back up an existing {PlatformNameShort} instance by running the `setup.sh` script with the `backup_dir` flag, which saves the content and configuration of your current environment: + +. Go to your {PlatformNameShort} installation directory. + +. Run the `setup.sh` script following the example below: ++ +---- +$ ./setup.sh -e 'backup_dir=/ansible/mybackup' -e 'use_compression=True' @credentials.yml -b +---- ++ +* `backup_dir` specifies a directory to save your backup to. ++ +* `@credentials.yml` passes the password variables and their values that are encrypted by `ansible-vault`. + +With a successful backup, a backup file is created at `/ansible/mybackup/.tar.gz`. + +*Additional resources* + +* For more information about backing up and restoring, see link:{URLControllerAdminGuide}/controller-backup-and-restore[Backup and restore] in _{TitleControllerAdminGuide}_. diff --git a/downstream/modules/platform/proc-benchmark-postgresql.adoc b/downstream/modules/platform/proc-benchmark-postgresql.adoc index facfe94a8e..803320d2b9 100644 --- a/downstream/modules/platform/proc-benchmark-postgresql.adoc +++ b/downstream/modules/platform/proc-benchmark-postgresql.adoc @@ -5,7 +5,7 @@ Check whether the minimum {PlatformNameShort} PostgreSQL database requirements are met by using the Flexible I/O Tester (FIO) tool. FIO is a tool used to benchmark read and write IOPS performance of the storage system. .Prerequisites - * You have installed the Flexible I/O Tester (`fio`) storage performance benchmarking tool. +* You have installed the Flexible I/O Tester (`fio`) storage performance benchmarking tool. + To install `fio`, run the following command as the root user: + @@ -64,5 +64,8 @@ read_iops: (g=0): rw=randread, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T) 4096B-40 […] ---- + -You must review, monitor, and revisit the log files according to your own business requirements, application workloads, and new demands. +[NOTE] +==== +The above is a baseline to help evaluate the best case performance on your systems. Systems can and will change and performance may vary depending on what else is happening on your systems, storage or network at the time of testing. You must review, monitor, and revisit the log files according to your own business requirements, application workloads, and new demands. +==== diff --git a/downstream/modules/platform/proc-change-ssl-installer.adoc b/downstream/modules/platform/proc-change-ssl-installer.adoc index 4945182417..928f27b3bd 100644 --- a/downstream/modules/platform/proc-change-ssl-installer.adoc +++ b/downstream/modules/platform/proc-change-ssl-installer.adoc @@ -9,10 +9,7 @@ The following procedure describes how to change the SSL certificate and key in t . Copy the new SSL certificates and keys to a path relative to the {PlatformNameShort} installer. . Add the absolute paths of the SSL certificates and keys to the inventory file. -Refer to the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/index#ref-hub-variables[{ControllerNameStart} variables], -link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/index#ref-controller-variables[{HubNameStart} variables], and link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/appendix-inventory-files-vars#event-driven-ansible-controller[{EDAcontroller} variables] sections of the -link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/index[{PlatformName} Installation Guide] -for guidance on setting these variables. +Refer to the link:{URLInstallationGuide}/appendix-inventory-files-vars#ref-controller-variables[{ControllerNameStart} variables], link:{URLInstallationGuide}/appendix-inventory-files-vars#ref-hub-variables[{HubNameStart} variables], and link:{URLInstallationGuide}/appendix-inventory-files-vars#event-driven-ansible-controller[{EDAcontroller} variables] sections of link:{LinkInstallationGuide} for guidance on setting these variables. + -- ** {ControllerNameStart}: `web_server_ssl_cert`, `web_server_ssl_key`, `custom_ca_cert` diff --git a/downstream/modules/platform/proc-choosing-obtaining-installer.adoc b/downstream/modules/platform/proc-choosing-obtaining-installer.adoc index 9050b7395b..6fc5f7e3dd 100644 --- a/downstream/modules/platform/proc-choosing-obtaining-installer.adoc +++ b/downstream/modules/platform/proc-choosing-obtaining-installer.adoc @@ -1,6 +1,6 @@ -// [id="proc-choosing-obtaining-installer_{context}"] +[id="proc-choosing-obtaining-installer_{context}"] = Choosing and obtaining a {PlatformName} installer @@ -13,14 +13,16 @@ Choose the {PlatformName} installer you need based on your {RHEL} environment in A valid Red Hat customer account is required to access {PlatformName} installer downloads on the Red Hat Customer Portal. ==== -.Installing with internet access +== Installing with internet access Choose the {PlatformName} installer if your {RHEL} environment is connected to the internet. Installing with internet access retrieves the latest required repositories, packages, and dependencies. Choose one of the following ways to set up your {PlatformNameShort} installer. *Tarball install* +.Procedure + . Navigate to the link:{PlatformDownloadUrl}[{PlatformName} download] page. -. Click btn:[Download Now] for the *Ansible Automation Platform Setup*. +. In the *Product software* tab, click btn:[Download Now] for the *Ansible Automation Platform Setup*. . Extract the files: + ----- @@ -29,31 +31,36 @@ $ tar xvzf ansible-automation-platform-setup-.tar.gz *RPM install* -. Install {PlatformNameShort} Installer Package +.Procedure + +. Install the {PlatformNameShort} Installer Package. + -v.{PlatformVers} for RHEL 8 for x86_64 +v.{PlatformVers} for RHEL 8 for x86_64: + ---- -$ sudo dnf install --enablerepo=ansible-automation-platform-2.4-for-rhel-8-x86_64-rpms ansible-automation-platform-installer +$ sudo dnf install --enablerepo=ansible-automation-platform-2.5-for-rhel-8-x86_64-rpms ansible-automation-platform-installer ---- + -v.{PlatformVers} for RHEL 9 for x86-64 +v.{PlatformVers} for RHEL 9 for x86-64: + ---- -$ sudo dnf install --enablerepo=ansible-automation-platform-2.4-for-rhel-9-x86_64-rpms ansible-automation-platform-installer +$ sudo dnf install --enablerepo=ansible-automation-platform-2.5-for-rhel-9-x86_64-rpms ansible-automation-platform-installer ---- - [NOTE] +==== `dnf install` enables the repo as the repo is disabled by default. +==== When you use the RPM installer, the files are placed under the `/opt/ansible-automation-platform/installer` directory. -.Installing without internet access +== Installing without internet access Use the {PlatformName} *Bundle* installer if you are unable to access the internet, or would prefer not to install separate components and dependencies from online repositories. Access to {RHEL} repositories is still needed. All other dependencies are included in the tar archive. +.Procedure + . Navigate to the link:{PlatformDownloadUrl}[{PlatformName} download] page. -. Click btn:[Download Now] for the *Ansible Automation Platform Setup Bundle*. +. In the *Product software* tab, click btn:[Download Now] for the *Ansible Automation Platform Setup Bundle*. . Extract the files: + ----- diff --git a/downstream/modules/platform/proc-cli-get-controller-pwd.adoc b/downstream/modules/platform/proc-cli-get-controller-pwd.adoc index 5426d2a5a2..636fe6192b 100644 --- a/downstream/modules/platform/proc-cli-get-controller-pwd.adoc +++ b/downstream/modules/platform/proc-cli-get-controller-pwd.adoc @@ -4,24 +4,24 @@ [id="proc-cli-get-controller-pwd{context}"] -= Fetching {ControllerNameStart} login details from the {OCPShort} CLI += Fetching {Gateway} login details from the {OCPShort} CLI -To login to the {ControllerNameStart}, you need the web address and the password. +To login to the {Gateway}, you need the web address and the password. -== Fetching the {ControllerName} web address +== Fetching the {Gateway} web address A {OCP} route exposes a service at a host name, so that external clients can reach it by name. -When you created the {ControllerName} instance, a route was created for it. -The route inherits the name that you assigned to the {ControllerName} object in the YAML file. +When you created the {Gateway} instance, a route was created for it. +The route inherits the name that you assigned to the {Gateway} object in the YAML file. Use the following command to fetch the routes: [subs="+quotes"] ----- -oc get routes -n ____ +oc get routes -n ____ ----- -In the following example, the `_example_` {ControllerName} is running in the `_ansible-automation-platform_` namespace. +In the following example, the `_example_` {Gateway} is running in the `_ansible-automation-platform_` namespace. ----- $ oc get routes -n ansible-automation-platform @@ -30,27 +30,28 @@ NAME HOST/PORT PATH SERVICES example example-ansible-automation-platform.apps-crc.testing example-service http edge/Redirect None ----- -The address for the {ControllerName} instance is `example-ansible-automation-platform.apps-crc.testing`. +The address for the {Gateway} instance is `example-ansible-automation-platform.apps-crc.testing`. -== Fetching the {ControllerName} password +== Fetching the {Gateway} password -The YAML block for the {ControllerName} instance in [filename]`sub.yaml` assigns values to the _name_ and _admin_user_ keys. -Use these values in the following command to fetch the password for the {ControllerName} instance. +The YAML block for the {Gateway} instance in the `AnsibleAutomationPlatform` object assigns values to the _name_ and _admin_user_ keys. +. Use these values in the following command to fetch the password for the {Gateway} instance. ++ ----- -oc get secret/--password -o yaml +oc get secret/--password -o yaml ----- - -The default value for _admin_user_ is `_admin_`. Modify the command if you changed the admin username in [filename]`sub.yaml`. - -The following example retrieves the password for an {ControllerName} object called `_example_`: - ++ +. The default value for _admin_user_ is `_admin_`. Modify the command if you changed the admin username in the `AnsibleAutomationPlatform` object. ++ +The following example retrieves the password for a {Gateway} object called `_example_`: ++ ----- oc get secret/example-admin-password -o yaml ----- - -The password for the {ControllerName} instance is listed in the `metadata` field in the output: - ++ +The base64 encoded password for the {Gateway} instance is listed in the `metadata` field in the output: ++ ----- $ oc get secret/example-admin-password -o yaml @@ -59,20 +60,23 @@ data: password: ODzLODzLODzLODzLODzLODzLODzLODzLODzLODzLODzL kind: Secret metadata: - annotations: - kubectl.kubernetes.io/last-applied-configuration: '{"apiVersion":"v1","kind":"Secret","metadata":{"labels":{"app.kubernetes.io/component":"automationcontroller","app.kubernetes.io/managed-by":"automationcontroller-operator","app.kubernetes.io/name":"example","app.kubernetes.io/operator-version":"","app.kubernetes.io/part-of":"example"},"name":"example-admin-password","namespace":"ansible-automation-platform"},"stringData":{"password":"88TG88TG88TG88TG88TG88TG88TG88TG"}}' - creationTimestamp: "2021-11-03T00:02:24Z" labels: - app.kubernetes.io/component: automationcontroller - app.kubernetes.io/managed-by: automationcontroller-operator + app.kubernetes.io/component: aap app.kubernetes.io/name: example app.kubernetes.io/operator-version: "" app.kubernetes.io/part-of: example name: example-admin-password namespace: ansible-automation-platform - resourceVersion: "185185" - uid: 39393939-5252-4242-b929-665f665f665f ------ +----- + +== Decoding the {Gateway} password + +After you have found your gateway password, you must decode it from base64. -For this example, the password is `88TG88TG88TG88TG88TG88TG88TG88TG`. +* Run the following command to decode your password from base64: ++ +---- +oc get secret/example-admin-password -o jsonpath={.data.password} | base64 --decode +---- ++ diff --git a/downstream/modules/platform/proc-completing-post-installation-tasks.adoc b/downstream/modules/platform/proc-completing-post-installation-tasks.adoc index 12dc9cf221..d6241bedf8 100644 --- a/downstream/modules/platform/proc-completing-post-installation-tasks.adoc +++ b/downstream/modules/platform/proc-completing-post-installation-tasks.adoc @@ -3,75 +3,17 @@ = Completing post installation tasks [role="_abstract"] -After you have completed the installation of {PlatformNameShort}, ensure that {HubName} and {ControllerName} deploy properly. - - -== Adding a controller subscription - - - -.Procedure - -. Navigate to the FQDN of the {ControllerNameStart}. Log in with the username admin and the password you specified as `admin_password` in your inventory file. - -. Click btn:[Browse] and select the __manifest.zip__ you created earlier. - -. Click btn:[Next]. - -. Uncheck btn:[User analytics] and btn:[Automation analytics]. These rely on an internet connection and must be turned off. - -. Click btn:[Next]. - -. Read the End User License Agreement and click btn:[Submit] if you agree. - -== Updating the CA trust store -As part of your post-installation tasks, you must update the software's certificates. -By default, {PlatformNameShort} {HubName} and {ControllerName} are installed using self-signed certificates. Because of this, the controller does not trust the hub’s certificate and will not download the {ExecEnvShort} from the hub. - -To ensure that automation controller downloads the execution environment from automation hub, you must import the hub’s Certificate Authority (CA) certificate as a trusted certificate on the controller. You can do this in one of two ways, depending on whether SSH is available as root user between {ControllerName} and {PrivateHubName}. - -=== Using secure copy (SCP) as a root user - -If SSH is available as the root user between the controller and {PrivateHubName}, use SCP to copy the root certificate on the {PrivateHubName} to the controller. - - -.Procedure - - . Run `update-ca-trust` on the controller to update the CA trust store: - ----- -$ sudo -i -# scp :/etc/pulp/certs/root.crt -/etc/pki/ca-trust/source/anchors/automationhub-root.crt -# update-ca-trust ----- - -=== Copying and pasting as a non root user - -If SSH is unavailable as root between the {PrivateHubName} and the controller, copy the contents of the file __/etc/pulp/certs/root.crt__ on the {PrivateHubName} and paste it into a new file on the controller called __/etc/pki/ca-trust/source/anchors/automationhub-root.crt__. - -.Procedure - -. Run `update-ca-trust` to update the CA trust store with the new certificate. On the {PrivateHubName}, run: +After you have completed the installation of {PlatformNameShort}, ensure that {HubName} and {ControllerName} deploy properly. ----- -$ sudo -i -# cat /etc/pulp/certs/root.crt -(copy the contents of the file, including the lines with 'BEGIN CERTIFICATE' and -'END CERTIFICATE') ----- +Before your first login, you must add your subscription information to the platform. To obtain your subscription information in uploadable form, see link:{URLCentralAuth}/assembly-gateway-licensing#assembly-aap-obtain-manifest-files[Obtaining a manifest file] in _{TitleCentralAuth}_. -. On the {ControllerName}: +Once you have obtained your subscription manifest, see link:{LinkGettingStarted} for instructions on how to upload your subscription information. ----- -$ sudo -i -# vi /etc/pki/ca-trust/source/anchors/automationhub-root.crt -(paste the contents of the root.crt file from the private automation hub into the new file and write to disk) -# update-ca-trust ----- +Now that you have successfully installed Ansible Automation Platform, to begin using its features, see the following guides for your next steps: +link:{LinkGettingStarted}. -.Additional Resources +link:{LinkHubManagingContent}. -* For further information on unknown certificate authority, see link:https://access.redhat.com/solutions/6707451[Project sync fails with unknown certificate authority error in {PlatformNameShort} 2.1]. +link:{LinkBuilder}. diff --git a/downstream/modules/platform/proc-configure-controller-OCP.adoc b/downstream/modules/platform/proc-configure-controller-OCP.adoc index b7cd1b2d6d..59e86da183 100644 --- a/downstream/modules/platform/proc-configure-controller-OCP.adoc +++ b/downstream/modules/platform/proc-configure-controller-OCP.adoc @@ -6,12 +6,12 @@ Make the following configuration changes in {ControllerName} to minimize downtim .Prerequisites -* {PlatformNameShort} 2.4 -* Ansible {ControllerName} 4.4 -* {OCPShort} -** > 4.10.42 -** > 4.11.16 -** > 4.12.0 +* {PlatformNameShort} 2.4 or later +* Ansible {ControllerName} 4.4 or later +* {OCPShort}: +** Later than 4.10.42 +** Later than 4.11.16 +** Later than 4.12.0 * High availability (HA) deployment of Postgres * Multiple worker node that {ControllerName} pods can be scheduled on diff --git a/downstream/modules/platform/proc-configure-known-proxies.adoc b/downstream/modules/platform/proc-configure-known-proxies.adoc index 00625167cb..13d1e6d954 100644 --- a/downstream/modules/platform/proc-configure-known-proxies.adoc +++ b/downstream/modules/platform/proc-configure-known-proxies.adoc @@ -5,26 +5,28 @@ [role="_abstract"] -To configure a list of known proxies for your {ControllerName}, add the proxy IP addresses to the *PROXY_IP_ALLOWED_LIST* field in the settings page for your {ControllerName}. +To configure a list of known proxies for your {ControllerName}, add the proxy IP addresses to the *Proxy IP Allowed List* field in the System Settings page. .Procedure -//[ddacosta] Need to verify that in 2.5 this is Settings[System]... -. On your {ControllerName}, navigate to {MenuAEAdminSettings} and select *Miscellaneous System settings* from the list of *System* options. -. In the *PROXY_IP_ALLOWED_LIST* field, enter IP addresses that are allowed to connect to your {ControllerName}, following the syntax in the example below: +//[ddacosta] The Settings > System configurations are for controller only, so don't change ControllerName to PlatformName. +. From the navigation panel, select {MenuSetSystem}. +. In the *Proxy IP Allowed List* field, enter IP addresses that are allowed to connect to your {ControllerName}, following the syntax in the example below: + -.Example *PROXY_IP_ALLOWED_LIST* entry +.Example *Proxy IP Allowed List* entry ---- [ "example1.proxy.com:8080", "example2.proxy.com:8080" ] ---- - ++ [IMPORTANT] ==== -* `PROXY_IP_ALLOWED_LIST` requires proxies in the list are properly sanitizing header input and correctly setting an ``X-Forwarded-For`` value equal to the real source IP of the client. {ControllerNameStart} can rely on the IP addresses and hostnames in `PROXY_IP_ALLOWED_LIST` to provide non-spoofed values for the `X-Forwarded-For` field. -* Do not configure `HTTP_X_FORWARDED_FOR` as an item in `REMOTE_HOST_HEADERS`unless *all* of the following conditions are satisfied: +* *Proxy IP Allowed List* requires proxies in the list are properly sanitizing header input and correctly setting an ``X-Forwarded-For`` value equal to the real source IP of the client. {ControllerNameStart} can rely on the IP addresses and hostnames in *Proxy IP Allowed List* to provide non-spoofed values for `X-Forwarded-For`.` +* Do not configure `HTTP_X_FORWARDED_FOR` as an item in *Remote Host Headers* unless *all* of the following conditions are satisfied: ** You are using a proxied environment with ssl termination; ** The proxy provides sanitization or validation of the `X-Forwarded-For` header to prevent client spoofing; ** `/etc/tower/conf.d/remote_host_headers.py` defines `PROXY_IP_ALLOWED_LIST` that contains only the originating IP addresses of trusted proxies or load balancers. ==== ++ +. Click btn:[Save] to save the settings. diff --git a/downstream/modules/platform/proc-configure-ldap-hub-ocp.adoc b/downstream/modules/platform/proc-configure-ldap-hub-ocp.adoc index 4d44ab753d..80aa683bf5 100644 --- a/downstream/modules/platform/proc-configure-ldap-hub-ocp.adoc +++ b/downstream/modules/platform/proc-configure-ldap-hub-ocp.adoc @@ -34,10 +34,7 @@ spec: ---- [NOTE] - ==== - Do not leave any fields empty. For fields with no variable, enter ```` to indicate a default value. - ==== diff --git a/downstream/modules/platform/proc-configure-upgraded-aap.adoc b/downstream/modules/platform/proc-configure-upgraded-aap.adoc index f3d37901d5..ed709e87d7 100644 --- a/downstream/modules/platform/proc-configure-upgraded-aap.adoc +++ b/downstream/modules/platform/proc-configure-upgraded-aap.adoc @@ -6,7 +6,7 @@ After upgrading your {PlatformName} instance, associate your original instances to its corresponding instance groups by configuring settings in the {ControllerName} UI: -. Log into the new Controller instance. +. Log in to the new Controller instance. . Content from old instance, such as credentials, jobs, inventories should now be visible on your Controller instance. . Navigate to {MenuInfrastructureInstanceGroups}. . Associate execution nodes by clicking on an instance group, then click the *Instances* tab. diff --git a/downstream/modules/platform/proc-configuring-controller-image-pull-policy.adoc b/downstream/modules/platform/proc-configuring-controller-image-pull-policy.adoc index 0fea68049b..7c34ed6721 100644 --- a/downstream/modules/platform/proc-configuring-controller-image-pull-policy.adoc +++ b/downstream/modules/platform/proc-configuring-controller-image-pull-policy.adoc @@ -5,7 +5,14 @@ Use this procedure to configure the image pull policy on your {ControllerName}. .Procedure -. Under *Image Pull Policy*, click on the radio button to select +. Log in to {OCP}. +. Go to menu:Operators[Installed Operators]. +. Select your {OperatorPlatformNameShort} deployment. +. Select the *Automation Controller* tab. +. For new instances, click btn:[Create AutomationController]. +.. For existing instances, you can edit the YAML view by clicking the {MoreActionsIcon} icon and then btn:[Edit AutomationController]. +. Click btn:[advanced Configuration]. +Under *Image Pull Policy*, click on the radio button to select * *Always* * *Never* * *IfNotPresent* diff --git a/downstream/modules/platform/proc-configuring-controller-ldap-security.adoc b/downstream/modules/platform/proc-configuring-controller-ldap-security.adoc index 94bdcdaf0e..6e0b6cf5f0 100644 --- a/downstream/modules/platform/proc-configuring-controller-ldap-security.adoc +++ b/downstream/modules/platform/proc-configuring-controller-ldap-security.adoc @@ -1,7 +1,12 @@ [id="proc_configuring-controller-ldap-security_{context}"] = Configuring your controller LDAP security -Use this procedure to configure LDAP security for your {ControllerName}. + +You can configure your LDAP SSL configuration for {ControllerName} through any of the following options: + +* The {ControllerName} user interface. +* The {Gateway} user interface. See the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/access_management_and_authentication/index#controller-set-up-LDAP[Configuring LDAP authentication] section of the _Access management and authentication_ guide for additional steps. +* The following procedure steps. .Procedure . If you do not have a `ldap_cacert_secret`, you can create one with the following command: @@ -22,7 +27,7 @@ data: kind: Secret metadata: name: mycerts - namespace: awx + namespace: AutomationController type: Opaque ---- <1> {ControllerNameStart} looks for the data field `ldap-ca.crt` in the specified secret when using the `ldap_cacert_secret`. diff --git a/downstream/modules/platform/proc-configuring-controller-route-options.adoc b/downstream/modules/platform/proc-configuring-controller-route-options.adoc index 260c3f95b7..ceb58068f2 100644 --- a/downstream/modules/platform/proc-configuring-controller-route-options.adoc +++ b/downstream/modules/platform/proc-configuring-controller-route-options.adoc @@ -5,6 +5,12 @@ The {PlatformName} operator installation form allows you to further configure your {ControllerName} operator route options under *Advanced configuration*. .Procedure +. Log in to {OCP}. +. Navigate to menu:Operators[Installed Operators]. +. Select your {OperatorPlatformNameShort} deployment. +. Select the *Automation Controller* tab. +. For new instances, click btn:[Create AutomationController]. +.. For existing instances, you can edit the YAML view by clicking the {MoreActionsIcon} icon and then btn:[Edit AutomationController]. . Click btn:[Advanced configuration]. . Under *Ingress type*, click the drop-down menu and select *Route*. . Under *Route DNS host*, enter a common host name that the route answers to. diff --git a/downstream/modules/platform/proc-configuring-reverse-proxy.adoc b/downstream/modules/platform/proc-configuring-reverse-proxy.adoc index 3a854d38c8..5d8830a997 100644 --- a/downstream/modules/platform/proc-configuring-reverse-proxy.adoc +++ b/downstream/modules/platform/proc-configuring-reverse-proxy.adoc @@ -7,13 +7,13 @@ = Configuring a reverse proxy [role="_abstract"] -You can support a reverse proxy server configuration by adding `HTTP_X_FORWARDED_FOR` to the *REMOTE_HOST_HEADERS* field in your {ControllerName} settings. The ``X-Forwarded-For`` (XFF) HTTP header field identifies the originating IP address of a client connecting to a web server through an HTTP proxy or load balancer. +You can support a reverse proxy server configuration by adding `HTTP_X_FORWARDED_FOR` to the *Remote Host Headers* field in the Systems Settings. The ``X-Forwarded-For`` (XFF) HTTP header field identifies the originating IP address of a client connecting to a web server through an HTTP proxy or load balancer. .Procedure -//[ddacosta] Need to verify that in 2.5 this is Settings[System]... -. On your {ControllerName}, navigate to {MenuAEAdminSettings} and select *Miscellaneous System settings* from the list of *System* options. -. In the *REMOTE_HOST_HEADERS* field, enter the following values: +//[ddacosta] Settings > System are controller specific for 2.5EA so don't change ControllerName to PlatformName. +. From the navigation panel, select {MenuSetSystem}. +. In the *Remote Host Headers* field, enter the following values: + ---- [ @@ -22,9 +22,11 @@ You can support a reverse proxy server configuration by adding `HTTP_X_FORWARDED "REMOTE_HOST" ] ---- ++ . Add the lines below to ``/etc/tower/conf.d/custom.py`` to ensure the application uses the correct headers: - ++ ---- USE_X_FORWARDED_PORT = True USE_X_FORWARDED_HOST = True ---- +. Click btn:[Save] to save the settings. diff --git a/downstream/modules/platform/proc-configuring-the-metrics-utility.adoc b/downstream/modules/platform/proc-configuring-the-metrics-utility.adoc new file mode 100644 index 0000000000..30f295f415 --- /dev/null +++ b/downstream/modules/platform/proc-configuring-the-metrics-utility.adoc @@ -0,0 +1,172 @@ +:_newdoc-version: 2.18.3 +:_template-generated: 2024-07-15 +:_mod-docs-content-type: PROCEDURE + +[id="configuring-the-metrics-utility"] += Configuring metrics-utility + +== On {RHEL} + +.Prerequisites: + +* An active {PlatformNameShort} subscription + +Metrics-utility is included with {PlatformNameShort}, so you do not need a separate installation. +The following commands gather the relevant data and generate a link:https://connect.redhat.com/en/programs/certified-cloud-service-provider[CCSP] report containing your usage metrics. +You can configure these commands as cronjobs to ensure they run at the beginning of every month. +See link:https://www.redhat.com/sysadmin/linux-cron-command[How to schedule jobs using the Linux 'cron' utility] for more on configuring using the cron syntax. + +.Procedure + +. In the cron file, set the following variables to ensure `metrics-utility` gathers the relevant data. To open the cron file for editing, run: ++ +[source, ] +---- +crontab -e +---- ++ +. Specify the following variables to indicate where the report is deposited in your file system: ++ +[source, ] +---- +export METRICS_UTILITY_SHIP_TARGET=directory +export METRICS_UTILITY_SHIP_PATH=/awx_devel/awx-dev/metrics-utility/shipped_data/billing +---- ++ +. Set these variables to generate a report: ++ +[source, ] +---- +export METRICS_UTILITY_REPORT_TYPE=CCSP +export METRICS_UTILITY_PRICE_PER_NODE=11.55 # in USD +export METRICS_UTILITY_REPORT_SKU=MCT3752MO +export METRICS_UTILITY_REPORT_SKU_DESCRIPTION="EX: Red Hat Ansible Automation Platform, Full Support (1 Managed Node, Dedicated, Monthly)" +export METRICS_UTILITY_REPORT_H1_HEADING="CCSP Reporting : ANSIBLE Consumption" +export METRICS_UTILITY_REPORT_COMPANY_NAME="Company Name" +export METRICS_UTILITY_REPORT_EMAIL="email@email.com" +export METRICS_UTILITY_REPORT_RHN_LOGIN="test_login" +export METRICS_UTILITY_REPORT_COMPANY_BUSINESS_LEADER="BUSINESS LEADER" +export METRICS_UTILITY_REPORT_COMPANY_PROCUREMENT_LEADER="PROCUREMENT LEADER" +---- ++ +. Add the following parameter to gather and store the data in the provided SHIP_PATH directory in the `./report_data` subdirectory: ++ +[source, ] +---- +metrics-utility gather_automation_controller_billing_data --ship --until=10m +---- ++ +. To configure the run schedule, add the following parameters to the end of the file and specify how often you want `metrics-utility` to gather information and build a report using link:https://www.redhat.com/sysadmin/linux-cron-command[cron syntax]. In the following example, the `gather` command is configured to run every hour at 00 minutes. The `build_report` command is configured to run every second day of each month at 4:00 AM. ++ +[source, ] +---- +0 */1 * * * metrics-utility gather_automation_controller_billing_data --ship --until=10m +0 4 2 * * metrics-utility build_report +---- ++ +. Save and close the file. +. To verify that you saved your changes, run: ++ +[source, ] +---- +crontab -l +---- ++ +. You can also check the logs to ensure that data is being collected. Run: ++ +[source, ] +---- +cat /var/log/cron +---- ++ +The following is an example of the output. Note that time and date might vary depending on how your configure the run schedule: ++ +[source, ] +---- +May 8 09:45:03 ip-10-0-6-23 CROND[51623]: (root) CMDOUT (No billing data for month: 2024-04) +May 8 09:45:03 ip-10-0-6-23 CROND[51623]: (root) CMDEND (metrics-utility build_report) +May 8 09:45:19 ip-10-0-6-23 crontab[51619]: (root) END EDIT (root) +May 8 09:45:34 ip-10-0-6-23 crontab[51659]: (root) BEGIN EDIT (root) +May 8 09:46:01 ip-10-0-6-23 CROND[51688]: (root) CMD (metrics-utility gather_automation_controller_billing_data --ship --until=10m) +May 8 09:46:03 ip-10-0-6-23 CROND[51669]: (root) CMDOUT (/tmp/9e3f86ee-c92e-4b05-8217-72c496e6ffd9-2024-05-08-093402+0000-2024-05-08-093602+0000-0.tar.gz) +May 8 09:46:03 ip-10-0-6-23 CROND[51669]: (root) CMDEND (metrics-utility gather_automation_controller_billing_data --ship --until=10m) +May 8 09:46:26 ip-10-0-6-23 crontab[51659]: (root) END EDIT (root) +---- ++ +. Run the following command to build a report for the previous month: ++ +[source, ] +---- +metrics-utility build_report +---- ++ +The generated report will have the default name CCSP--.xlsx and will be deposited in the ship path that you specified in step 2. + +== On {OCPShort} from the {PlatformNameShort} operator + +Metrics-utility is included in the {OCPShort} image beginning with version 4.12. If your system does not have `metrics-utility` installed, update your OpenShift image to the latest version. + +Follow the steps below to configure the run schedule for `metrics-utility` on {OCPShort} using the {PlatformNameShort} operator. + +.Prerequisites: +* A running OpenShift cluster +* An operator-based installation of {PlatformNameShort} on {OCPShort}. + +NOTE: Metrics-utility will run as indicated by the parameters you set in the configuration file. The utility cannot be run manually on {OCPShort}. + +=== Create a ConfigMap in the OpenShift UI YAML view + +. From the navigation panel on the left side, select *ConfigMaps*, and then click the *Create ConfigMap* button. +. On the next screen, select the *YAML view* tab. +. In the `YAML` field, enter the following parameters with the appropriate variables set: ++ +[source, ] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: automationcontroller-metrics-utility-configmap +data: + METRICS_UTILITY_SHIP_TARGET: directory + METRICS_UTILITY_SHIP_PATH: /metrics-utility + METRICS_UTILITY_REPORT_TYPE: CCSP + METRICS_UTILITY_PRICE_PER_NODE: '11' # in USD + METRICS_UTILITY_REPORT_SKU: MCT3752MO + METRICS_UTILITY_REPORT_SKU_DESCRIPTION: "EX: Red Hat Ansible Automation Platform, Full Support (1 Managed Node, Dedicated, Monthly)" + METRICS_UTILITY_REPORT_H1_HEADING: "CCSP Reporting : ANSIBLE Consumption" + METRICS_UTILITY_REPORT_COMPANY_NAME: "Company Name" + METRICS_UTILITY_REPORT_EMAIL: "email@email.com" + METRICS_UTILITY_REPORT_RHN_LOGIN: "test_login" + METRICS_UTILITY_REPORT_COMPANY_BUSINESS_LEADER: "BUSINESS LEADER" + METRICS_UTILITY_REPORT_COMPANY_PROCUREMENT_LEADER: "PROCUREMENT LEADER" +---- ++ +. Click btn:[Create]. +. To verify that the ConfigMap was created and the metric utility is installed, select *ConfigMap* from the navigation panel and look for your ConfigMap in the list. + + +=== Deploy {ControllerName} + +Deploy {ControllerName} and specify variables for how often `metrics-utility` gathers usage information and generates a report. + +. From the navigation panel, select *Installed Operators*. +. Select {PlatformNameShort}. +. In the Operator details, select the *{ControllerName}* tab. +. Click btn:[Create {ControllerName}]*. +. Select the *YAML view* option. The `YAML` now shows the default parameters for {ControllerName}. +The relevant parameters for `metrics-utility` are the following: ++ +---- +[cols="50%,50%",options="header"] +|==== +| *Parameter* | *Variable* +| *`metrics_utility_enabled`* | True. +| *`metrics_utility_cronjob_gather_schedule`* | @hourly or @daily. +| *`metrics_utility_cronjob_report_schedule`* | @daily or @monthly. +|==== +---- ++ +. Find the `metrics_utility_enabled` parameter and change the variable to `true`. +. Find the `metrics_utility_cronjob_gather_schedule` parameter and enter a variable for how often the utility should gather usage information (for example, @hourly or @daily). +. Find the `metrics_utility_cronjob_report_schedule` parameter and enter a variable for how often the utility generates a report (for example, @daily or @monthly). +. Click btn:[Create]. diff --git a/downstream/modules/platform/proc-connecting-nodes-through-mesh-ingress.adoc b/downstream/modules/platform/proc-connecting-nodes-through-mesh-ingress.adoc index de67cef383..ff0fef65dd 100644 --- a/downstream/modules/platform/proc-connecting-nodes-through-mesh-ingress.adoc +++ b/downstream/modules/platform/proc-connecting-nodes-through-mesh-ingress.adoc @@ -19,53 +19,36 @@ Use the following procedure to set up mesh nodes. .Procedure -. Create a YAML file to set up the mesh ingress node. +. Create a YAML file (in this case named `oc_meshingress.yml`) to set up the mesh ingress node. + -The file resembles the following: +Your file should resemble the following: + ---- -apiVersion: +apiVersion: automationcontroller.ansible.com/v1alpha1 kind: AutomationControllerMeshIngress metadata: name: namespace: spec: - deployment name: + deployment_name: aap-controller ---- + Where: * *apiVersion*: defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and might reject unrecognized values. +This value is static. * *kind*: Is the type of node to create. -Set the value to `AutomationControllerMeshIngress`. -`AutomationControllerMeshIngress` controls the deployment and configuration of mesh ingress on {ControllerName}. -* *name*: is the name of the mesh ingress node. -* *namespace*: is which Kubernetes namespace to deploy the mesh ingress into. -This must be in the same namespace as the {ControllerName} that the mesh is connected to -* *deployment_name*: You can find the deployment name by using: + ----- -oc get ansible-automation-platform. ----- +Use the value `AutomationControllerMeshIngress`. + -//Additionally you can use: +`AutomationControllerMeshIngress` controls the deployment and configuration of mesh ingress on {ControllerName}. +* *name*: enter a name for the mesh ingress node. +* *namespace*: enter a name for the Kubernetes namespace to deploy the mesh ingress into. + -//* *external_hostname*: an optional field used for specifying the external hostname defined in an user managed ingress. -//* *external_ipaddress*: an optional field used for specifying the external IP address defined in an user managed ingress -//* *ingress_type*: Ingress type for ingress managed by the operator. -//Where options are: -//** none (default) -//** Ingress -//** IngressRouteTCP -//** Route (default when deployed on OpenShift) -//* *ingress_api_version*: the API Version for ingress managed by the operator. -//This parameter is ignored when `ingress_type=Route`. -//* *ingress_annotations*: annotation on the ingress managed by the operator -//* *ingress_class_name*: the name of ingress class to use instead of the cluster default. -//This parameter is ignored when `ingress_type=Route`. -//* *ingress_controller*: special configuration for specific Ingress Controllers. -//This parameter is ignored when `ingress_type=Route`. +This must be in the same namespace as the {ControllerName} that the mesh is connected to. +* *deployment_name*: is the {ControllerName} instance that this mesh ingress is attached to. +Provide the name of your {ControllerName} instance. . Apply this YAML file using: + @@ -73,8 +56,27 @@ oc get ansible-automation-platform. oc apply -f oc_meshingress.yml ---- + -This runs the playbook associated with `AutomationControllerMeshIngress`, and creates the hop node called ``. +Run this playbook to creates the `AutomationControllerMeshIngress` resource. +The operator creates a hop node in {ControllerName} with the `name` you supplied. . When the MeshIngress instance has been created, it appears in the Instances page. - - ++ +[IMPORTANT] +==== +Any instance that is to function as a remote execution node in "pull" mode need to be created after this procedure and must be configured as follows: +---- +instance type: Execution +listener port: keep empty +options: + Enable instance: checked + Managed by Policy: as needed + Peers from control nodes: unchecked (this one is important) +---- +==== +. Associate this new instance with the hop node you created using the procedure in this paragraph +. Download the tarball. ++ +[NOTE] +==== +Association with the hop node must be done before creating the tarball. +==== \ No newline at end of file diff --git a/downstream/modules/platform/proc-control-data-collection.adoc b/downstream/modules/platform/proc-control-data-collection.adoc index 12f09d5952..d8c2fa3be6 100644 --- a/downstream/modules/platform/proc-control-data-collection.adoc +++ b/downstream/modules/platform/proc-control-data-collection.adoc @@ -3,15 +3,12 @@ = Controlling data collection from {ControllerName} [role="_abstract"] -You can control how {ControllerName} collects data by setting your participation level in the *User Interface* tab in the *Settings* menu. +You can control how {ControllerName} collects data from the {MenuSetSystem} menu. .Procedure . Log in to your {ControllerName}. -//[ddacosta]I don't see an equivalent in 2.5, need to verify where it gets added -. Navigate to {MenuAEAdminSettings} and select *User Interface settings* from the *User Interface* option. -. Select the desired level of data collection from the *User Analytics Tracking State* drop-down list: -** *Off*: Prevents any data collection. -** *Anonymous*: Enables data collection without your specific user data. -** *Detailed*: Enables data collection including your specific user data. -. Click btn:[Save] to apply the settings or btn:[Cancel] to discard the changes. +. From the navigation panel, select {MenuSetSystem}. +. Select *Gather data for Automation Analytics* to enable {ControllerName} to gather data on automation and send it to Automation Analytics. + + \ No newline at end of file diff --git a/downstream/modules/platform/proc-controller-access-topology-viewer.adoc b/downstream/modules/platform/proc-controller-access-topology-viewer.adoc index e88f204bb1..77ba1c06d7 100644 --- a/downstream/modules/platform/proc-controller-access-topology-viewer.adoc +++ b/downstream/modules/platform/proc-controller-access-topology-viewer.adoc @@ -22,9 +22,9 @@ To reset the view to its default view, click the *Reset view* (image:reset.png[R . Refer to the *Legend* to identify the type of nodes that are represented. + -For VM-based installations, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_automation_mesh_guide_for_vm-based_installations/assembly-planning-mesh#con-automation-mesh-node-types[Control and execution planes] +For VM-based installations, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_mesh_for_vm_environments/assembly-planning-mesh#con-automation-mesh-node-types[Control and execution planes]. + -For operator-based installations, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_automation_mesh_for_operator-based_installations/assembly-planning-mesh#con-automation-mesh-node-types[Control and execution planes] for more information about each type of node. +For operator-based installations, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_mesh_for_managed_cloud_or_operator_environments/assembly-planning-mesh#con-automation-mesh-node-types[Control and execution planes] for more information about each type of node. + //Not relevant in the 2.5 UI: //[NOTE] @@ -55,4 +55,4 @@ You can use the *Details* page to remove the instance, run a health check on the However, you can disable it to exclude the node from having any jobs running on it. . Additional resources -For more information about creating new nodes and scaling the mesh, see xref:assembly-controller-instances[Managing Capacity with Instances]. +For more information about creating new nodes and scaling the mesh, see xref:assembly-controller-instances[Managing capacity with Instances]. diff --git a/downstream/modules/platform/proc-controller-activity-stream.adoc b/downstream/modules/platform/proc-controller-activity-stream.adoc index e90f2b74f3..8222ea38e8 100644 --- a/downstream/modules/platform/proc-controller-activity-stream.adoc +++ b/downstream/modules/platform/proc-controller-activity-stream.adoc @@ -8,8 +8,10 @@ Most screens have an Activity Stream image:activitystream.png[activitystream,15, image:users-activity-stream.png[Activity Stream] An Activity Stream shows all changes for a particular object. + For each change, the Activity Stream shows the time of the event, the user that initiated the event, and the action. The information displayed varies depending on the type of event. + Click the btn:[Examine] image:examine.png[View Event Details,15,15] icon to display the event log for the change. image:activity-stream-event-log.png[event log] diff --git a/downstream/modules/platform/proc-controller-add-groups-hosts.adoc b/downstream/modules/platform/proc-controller-add-groups-hosts.adoc index e15636dbe3..50ab869589 100644 --- a/downstream/modules/platform/proc-controller-add-groups-hosts.adoc +++ b/downstream/modules/platform/proc-controller-add-groups-hosts.adoc @@ -4,7 +4,7 @@ Groups are only applicable to standard inventories and are not configurable directly through a Smart Inventory. You can associate an existing group through hosts that are used with standard inventories. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#proc-controller-add-groups[Adding groups to inventories] in the _{ControllerUG}_. +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#proc-controller-add-groups[Adding groups to inventories] in _{ControllerUG}_. .Procedure //[ddacosta] Need to verify this is the correct flow. The original content identified menus that don't exist. diff --git a/downstream/modules/platform/proc-controller-add-groups-to-groups.adoc b/downstream/modules/platform/proc-controller-add-groups-to-groups.adoc index 5399ac7e54..d9b73a40a4 100644 --- a/downstream/modules/platform/proc-controller-add-groups-to-groups.adoc +++ b/downstream/modules/platform/proc-controller-add-groups-to-groups.adoc @@ -7,16 +7,16 @@ When you have added a group to a template, the Group *Details* page is displayed .Procedure . Select the *Related Groups* tab. -. Click btn:[Existing group] to add a group that already exists in your configuration or btn:[New group] to create a new group. +. Click btn:[Add existing group] to add a group that already exists in your configuration or btn:[Create group] to create a new group. . If creating a new group, enter the appropriate details into the required and optional fields: * *Name* (required): * Optional: *Description*: Enter a description as appropriate. -* *Variables*: Enter definitions and values to be applied to all hosts in this group. +* Optional: *Variables*: Enter definitions and values to be applied to all hosts in this group. Enter variables using either JSON or YAML syntax. Use the radio button to toggle between the two. -. Click btn:[Save]. -. The *Create Group* window closes and the newly created group is displayed as an entry in the list of groups associated with the group that it was +. Click btn:[Create group]. +. The *Create group* window closes and the newly created group is displayed as an entry in the list of groups associated with the group that it was created for. //+ //image:inventories-add-group-subgroup-added.png[Inventories add group subgroup] diff --git a/downstream/modules/platform/proc-controller-add-groups.adoc b/downstream/modules/platform/proc-controller-add-groups.adoc index 55a9150d9e..42c1cdfd9a 100644 --- a/downstream/modules/platform/proc-controller-add-groups.adoc +++ b/downstream/modules/platform/proc-controller-add-groups.adoc @@ -29,7 +29,7 @@ All of these spawned groups can have hosts. . From the navigation panel, select {MenuInfrastructureInventories}. . Select the Inventory name you want to add groups to. . In the Inventory *Details* page, select the *Groups* tab. -. Click btn:[Create group] to open the *Create new group* window. +. Click btn:[Create group]. //+ //image:inventories-add-group-new.png[Inventories_manage_group_add] @@ -37,8 +37,9 @@ All of these spawned groups can have hosts. * *Name*: Required * Optional: *Description*: Enter a description as appropriate. -* *Variables*: Enter definitions and values to be applied to all hosts in this group. +* Optional: *Variables*: Enter definitions and values to be applied to all hosts in this group. Enter variables by using either JSON or YAML syntax. Use the radio button to toggle between the two. -. Click btn:[Save]. -. When you have added a group to a template, the Group *Details* page is displayed. +. Click btn:[Create group]. + +When you have added a group to a template, the Group *Details* page is displayed. diff --git a/downstream/modules/platform/proc-controller-add-hosts.adoc b/downstream/modules/platform/proc-controller-add-hosts.adoc index ba6ab169a0..bd9c3abf20 100644 --- a/downstream/modules/platform/proc-controller-add-hosts.adoc +++ b/downstream/modules/platform/proc-controller-add-hosts.adoc @@ -15,7 +15,7 @@ You can configure hosts for the inventory and for groups and groups within group * *Name* (required): * Optional: *Description*: Enter a description as appropriate. -* *Variables*: Enter definitions and values to be applied to all hosts in this group, as in the following example: +* Optional: *Variables*: Enter definitions and values to be applied to all hosts in this group, as in the following example: + [literal, options="nowrap" subs="+attributes"] ---- @@ -29,7 +29,7 @@ You can configure hosts for the inventory and for groups and groups within group Enter variables by using either JSON or YAML syntax. Use the radio button to toggle between the two. . Click btn:[Create host]. -. The *Create Host* window closes and the newly created host is displayed in the list of hosts associated with the group that it was created for. +. The *Create host* window closes and the newly created host is displayed in the list of hosts associated with the group that it was created for. + //image:inventories-add-group-host-added.png[Inventories add group host] + diff --git a/downstream/modules/platform/proc-controller-add-organization-user.adoc b/downstream/modules/platform/proc-controller-add-organization-user.adoc index 652f53b73d..8807e2b2de 100644 --- a/downstream/modules/platform/proc-controller-add-organization-user.adoc +++ b/downstream/modules/platform/proc-controller-add-organization-user.adoc @@ -1,37 +1,40 @@ [id="proc-controller-add-organization-user"] -= Add a User or Team += Adding a user to an organization -To add a user or team to an organization, the user or team must already exist. +You can provide a user with access to an organization by adding them to the organization and managing the roles associated with the user. To add a user to an organization, the user must already exist. For more information, see xref:proc-controller-creating-a-user[Creating a user]. +To add roles for a user, the role must already exist. See xref:proc-gw-create-roles[Creating a role] for more information. -For more information, see xref:proc-controller-creating-a-user[Creating a User] and xref:proc-controller-creating-a-team[Creating a Team]. +The following tab selections are available when adding users to an organization. When user accounts from the {ControllerName} organization have been migrated to {PlatformNameShort} 2.5 during the upgrade process, the *Automation Execution* tab shows content based on whether the users were added to the organization prior to migration. -To add existing users or team to the Organization: +{PlatformNameShort}:: Reflects all users added to the organization at the platform level. From this tab, you can add users as organization members and, optionally provide specific organization level roles. + +Automation Execution:: Reflects users that were added directly to the {ControllerName} organization prior to an upgrade and migration. From this tab, you can only view existing memberships in {ControllerName} and remove those memberships but not you can not add new memberships. + +New user memberships to an organization must be added at the platform level. .Procedure -. In the *Access tab* of the *Organization* page, click btn:[Add]. -. Select a user or team to add. -. Click btn:[Next]. -. Select one or more users or teams from the list by clicking the checkbox next to the name to add them as members. +. From the navigation panel, select {MenuAMOrganizations}. +. From the *Organizations* list view, select the organization to which you want to add a user. +. Click the *Users* tab to add users. +. Select the *{PlatformNameShort}* tab and click btn:[Add users] to add user access to the team, or select the *Automation Execution* tab to view or remove user access from the team. +. Select one or more users from the list by clicking the checkbox next to the name to add them as members. . Click btn:[Next]. +. Select the roles you want the selected user to have. Scroll down for a complete list of roles. + -image:organizations-add-users-for-example-organization.png[Add roles] +include::snippets/snip-gw-roles-note-multiple-components.adoc[] + -In this example, two users have been selected. -. Select the role you want the selected user or team to have. -Scroll down for a complete list of roles. -Different resources have different options available. -+ -image:organizations-add-users-roles.png[Add user roles] -. Click btn:[Save] to apply the roles to the selected user or team, and to add them as members. -The *Add Users* or *Add Teams* window displays the updated roles assigned for each user and team. +. Click btn:[Next] to review the roles settings. +. Click btn:[Finish] to apply the roles to the selected users, and to add them as members. The *Add roles* dialog displays the updated roles assigned for each user. + [NOTE] ==== -A user or team with associated roles retains them if they are reassigned to another organization. +A user with associated roles retains them if they are reassigned to another organization. ==== -. To remove roles for a particular user, click the disassociate image:disassociate.png[Disassociate,10,10] icon next to its resource. -This launches a confirmation dialog, asking you to confirm the disassociation. ++ +. To remove a particular user from the organization, select *Remove user* from the *More actions {MoreActionsIcon}* list next to the user. This launches a confirmation dialog, asking you to confirm the removal. +. To manage roles for users in an organization, click the *{SettingsIcon}* icon next to the user and select *Manage roles*. + diff --git a/downstream/modules/platform/proc-controller-add-source.adoc b/downstream/modules/platform/proc-controller-add-source.adoc index d5f722144a..0cc32a5b10 100644 --- a/downstream/modules/platform/proc-controller-add-source.adoc +++ b/downstream/modules/platform/proc-controller-add-source.adoc @@ -12,7 +12,7 @@ Adding a source to an inventory only applies to standard inventories. . From the navigation panel, select {MenuInfrastructureInventories}. . Select the inventory name you want to add a source to. . In the Inventory *Details* page, select the *Sources* tab. -. Click btn:[Add source]. This opens the *Add new source* window. +. Click btn:[Create source]. + //image:inventories-create-source.png[Inventories create source] @@ -27,10 +27,10 @@ For more information about sources, and supplying the appropriate information, s . When the information for your chosen xref:ref-controller-inventory-sources[Inventory sources] is complete, you can optionally specify other common parameters, such as verbosity, host filters, and variables. . Use the *Verbosity* menu to select the level of output on any inventory source's update jobs. -. Use the *Host Filter* field to specify only matching host names to be imported into {ControllerName}. -. In the *Enabled Variable* field, specify that {ControllerName} retrieves the enabled state from the dictionary of host variables. +. Use the *Host filter* field to specify only matching host names to be imported into {ControllerName}. +. In the *Enabled variable* field, specify that {ControllerName} retrieves the enabled state from the dictionary of host variables. You can specify the enabled variable by using dot notation as 'foo.bar', in which case the lookup searches nested dictionaries, equivalent to: `from_dict.get('foo', {}).get('bar', default)`. -. If you specified a dictionary of host variables in the *Enabled Variable* field, you can provide a value to enable on import. +. If you specified a dictionary of host variables in the *Enabled variable* field, you can give a value to enable on import. For example, for `enabled_var='status.power_state'` and `'enabled_value='powered_on'` in the following host variables, the host is marked `enabled`: + [literal, options="nowrap" subs="+attributes"] @@ -56,23 +56,23 @@ the {ControllerName} inventory. Hosts and groups that were not managed by the inventory source are promoted to the next manually created group, or, if there is no manually created group to promote them into, they are left in the "all" default group for the inventory. + When not checked, local child hosts and groups not found on the external source remain untouched by the inventory update process. -* *Overwrite Variables*: If checked, all variables for child groups and hosts are removed and replaced by those found on the external source. +* *Overwrite variables*: If checked, all variables for child groups and hosts are removed and replaced by those found on the external source. + When not checked, a merge is performed, combining local variables with those found on the external source. -* *Update on Launch*: Each time a job runs using this inventory, refresh the inventory from the selected source before executing job tasks. +* *Update on launch*: Each time a job runs using this inventory, refresh the inventory from the selected source before executing job tasks. + To avoid job overflows if jobs are spawned faster than the inventory can synchronize, selecting this enables you to configure a *Cache Timeout* to previous cache inventory synchronizations for a certain number of seconds. + -The *Update on Launch* setting refers to a dependency system for projects and inventory, and does not specifically exclude two jobs from running at the same time. +The *Update on launch* setting refers to a dependency system for projects and inventory, and does not specifically exclude two jobs from running at the same time. + If a cache timeout is specified, then the dependencies for the second job are created, and it uses the project and inventory update that the first job spawned. + Both jobs then wait for that project or inventory update to finish before proceeding. If they are different job templates, they can then both start and run at the same time, if the system has the capacity to do so. -If you intend to use {ControllerName}'s provisioning callback feature with a dynamic inventory source, *Update on Launch* must be set for the inventory +If you intend to use {ControllerName}'s provisioning callback feature with a dynamic inventory source, *Update on launch* must be set for the inventory group. + -If you synchronize an inventory source that uses a project that has *Update On Launch* set, then the project might automatically update (according to +If you synchronize an inventory source that uses a project that has *Update On launch* set, then the project might automatically update (according to cache timeout rules) before the inventory update starts. + You can create a job template that uses an inventory that sources from the same project that the template uses. @@ -101,7 +101,7 @@ The *Notifications* tab is only present when you have saved the newly-created so ==== . If notifications are already set up, use the toggles to enable or disable the notifications to use with your particular source. For more information, see xref:controller-enable-disable-notifications[Enable and Disable Notifications]. -. If you have not set up notifications, see xref:controller-notifications[Notifications] for more information. +. If you have not set up notifications, see xref:controller-notifications[Notifiers] for more information. . Review your entries and selections. . Click btn:[Save]. diff --git a/downstream/modules/platform/proc-controller-add-users-job-templates.adoc b/downstream/modules/platform/proc-controller-add-users-job-templates.adoc index 2315685b6d..b6de4425ce 100644 --- a/downstream/modules/platform/proc-controller-add-users-job-templates.adoc +++ b/downstream/modules/platform/proc-controller-add-users-job-templates.adoc @@ -4,13 +4,25 @@ .Procedure -. From the navigation panel, select {MenuAMCredentials}. +. From the navigation panel, select {MenuAECredentials}. . Select the credential that you want to assign to additional users. -. Click the *Access* tab. +. Click the *User Access* tab. You can see users and teams associated with this credential and their roles. -. Choose a user and click btn:[Add]. If no users exist, add them from the *Users* menu. -For more information, see xref:assembly-controller-users[Users]. -. Select *Job Templates* to display the job templates associated with this credential, and which jobs have run recently by using this credential. -. Choose a job template and click btn:[Add] to assign the credential to additional job templates. +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/access_management_and_authentication/gw-managing-access#assembly-controller-users_gw-manage-rbac[Users]. +. Click btn:[Add roles]. +. Select the user(s) that you want to give access to the credential and click btn:[Next]. +. From the *Select roles to apply* page, select the roles you want to add to the User. +. Click btn:[Next]. +. Review your selections and click btn:[Finish] to add the roles or click btn:[Back] to make changes. ++ +The *Add roles* window displays stating whether the action was successful. ++ +If the action is not successful, a warning displays. ++ +. Click btn:[Close]. +. The *User Access* page displays the summary information. +. Select the *Job templates* tab to select a job template to which you want to assign this credential. +. Chose a job template or select *Create job template* from the *Create template* list to assign the credential to additional job templates. ++ For more information about creating new job templates, see the xref:controller-job-templates[Job templates] section. diff --git a/downstream/modules/platform/proc-controller-adding-a-project.adoc b/downstream/modules/platform/proc-controller-adding-a-project.adoc index ef05947440..90aa528808 100644 --- a/downstream/modules/platform/proc-controller-adding-a-project.adoc +++ b/downstream/modules/platform/proc-controller-adding-a-project.adoc @@ -15,12 +15,12 @@ You can create a logical collection of playbooks, called projects in {Controller * *Name* (required) * Optional: *Description* * *Organization* (required): A project must have at least one organization. Select one organization now to create the project. When the project is created you can add additional organizations. -* Optional: *Execution Environment*: Enter the name of the {ExecEnvShort} or search from a list of existing ones to run this project. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_upgrade_and_migration_guide/upgrading-to-ees[Migrating to automation execution environments] in the _Red Hat Ansible Automation Platform Upgrade and Migration Guide_. -* *Source Control Type* (required): Select an SCM type associated with this project from the menu. +* Optional: *Execution environment*: Enter the name of the {ExecEnvShort} or search from a list of existing ones to run this project. +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/creating_and_using_execution_environments/index[Creating and using execution environments]. +* *Source control type* (required): Select an SCM type associated with this project from the menu. Options in the following sections become available depending on the type chosen. For more information, see xref:proc-projects-manage-playbooks-manually[Managing playbooks manually] or xref:ref-projects-manage-playbooks-with-source-control[Managing playbooks using source control]. -* Optional: *Content Signature Validation Credential*: Use this field to enable content verification. +* Optional: *Content signature validation credential*: Use this field to enable content verification. Specify the GPG key to use for validating content signature during project synchronization. If the content has been tampered with, the job will not run. For more information, see xref:assembly-controller-project-signing[Project signing and verification]. diff --git a/downstream/modules/platform/proc-controller-adding-gpg-key.adoc b/downstream/modules/platform/proc-controller-adding-gpg-key.adoc index b221a0cd47..ca09d29c2d 100644 --- a/downstream/modules/platform/proc-controller-adding-gpg-key.adoc +++ b/downstream/modules/platform/proc-controller-adding-gpg-key.adoc @@ -11,7 +11,7 @@ $ gpg --export --armour > my_public_key.asc ---- [arabic] -. From the navigation panel, select {MenuAMCredentials}. +. From the navigation panel, select {MenuAECredentials}. . Click btn:[Create credential]. . Give a meaningful name for the new credential, for example, "Infrastructure team public GPG key". . In the *Credential type* field, select *GPG Public Key*. diff --git a/downstream/modules/platform/proc-controller-adding-new-inventory.adoc b/downstream/modules/platform/proc-controller-adding-new-inventory.adoc index 1655ce0bf3..7499c084d6 100644 --- a/downstream/modules/platform/proc-controller-adding-new-inventory.adoc +++ b/downstream/modules/platform/proc-controller-adding-new-inventory.adoc @@ -21,40 +21,56 @@ The *Inventories* window displays a list of the inventories that are currently a * *Name*: Enter a name appropriate for this inventory. * Optional: *Description*: Enter an arbitrary description as appropriate. * *Organization*: Required. Choose among the available organizations. -//* Only applicable to Smart Inventories: *Smart Host Filter*: Click the image:search.png[Search,15,15] icon to open a separate window to filter hosts for this inventory. -//These options are based on the organization you chose. -//+ -//Filters are similar to tags in that tags are used to filter certain hosts that contain those names. -//Therefore, to populate the *Smart Host Filter* field, specify a tag that contains the hosts you want, not the hosts themselves. -//Enter the tag in the *Search* field and click btn:[Enter]. -//Filters are case-sensitive. -//For more information, see xref:ref-controller-smart-host-filter[Smart host filters]. -* *Instance Groups*: Click the image:search.png[Search,15,15] icon to open a separate window. -Select the instance group or groups for this inventory to run on. -If the list is extensive, use the search to narrow the options. -You can select multiple instance groups and sort them in the order that you want them run. +* Only applicable to Smart Inventories: *Smart host filter*: Populate the hosts for this inventory by using a search filter. ++ +.Example ++ +name__icontains=RedHat. ++ +These options are based on the organization you chose. ++ +Filters are similar to tags in that tags are used to filter certain hosts that contain those names. +Therefore, to populate the *Smart host filter* field, specify a tag that has the hosts you want, not the hosts themselves. ++ +Filters are case-sensitive. +* *Instance groups*: Select the instance group or groups for this inventory to run on. ++ +You can select many instance groups and sort them in the order that you want them run. + //image:select-instance-groups-modal.png[image] * Optional: *Labels*: Supply labels that describe this inventory, so they can be used to group and filter inventories and jobs. * Only applicable to constructed inventories: *Input inventories*: Specify the source inventories to include in this constructed inventory. -Click the image:search.png[Search,15,15] icon to select from available inventories. +//Click the image:search.png[Search,15,15] icon to select from available inventories. Empty groups from input inventories are copied into the constructed inventory. -* Optional:(Only applicable to constructed inventories): *Cached timeout (seconds)*: Set the length of time you want the cache plugin -data to timeout. +* Optional:(Only applicable to constructed inventories): *Cached timeout (seconds)*: Set the length of time you want the cache plugin data to timeout. * Only applicable to constructed inventories: *Verbosity*: Control the level of output that Ansible produces as the playbook executes related to inventory sources associated with constructed inventories. -Select the verbosity from Normal to various Verbose or Debug settings. -This only appears in the "details" report view. -** Verbose logging includes the output of all commands. -** Debug logging is exceedingly verbose and includes information about SSH operations that can be useful in certain -support instances. Most users do not need to see debug mode output. ++ +Select the verbosity from: + +* *Normal* +* *Verbose* +* *More verbose* +* *Debug* +* *Connection Debug* +* *WinRM Debug* + +** *Verbose* logging includes the output of all commands. +** *More verbose* provides more detail than *Verbose*. +** *Debug* logging is exceedingly verbose and includes information about SSH operations that can be useful in certain support instances. Most users do not need to see debug mode output. +//Not sure of this +** *Connection Debug* enables you to run SSH in verbose mode, providing debugging information about the SSH connection progress. +//Not sure of this. +** *WinRM Debug* used for verbosity specific to windows remote management ++ +Click the image:arrow.png[Expand,15,15] icon for information on *How to use the constructed inventory plugin*. * Only applicable to constructed inventories: *Limit*: Restricts the number of returned hosts for the inventory source associated with the constructed inventory. You can paste a group name into the limit field to only include hosts in that group. For more information, see the *Source vars* setting. * Only applicable to standard inventories: *Options*: Check the *Prevent Instance Group Fallback* option to enable only the instance groups listed in the *Instance Groups* field to execute the job. If unchecked, all available instances in the execution pool are used based on the hierarchy described in -link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#controller-control-job-run[Control where a job runs] in the _{ControllerAG}_. -Click the image:question_circle.png[Help,15,15] icon for additional information. +xref:controller-control-job-run[Control where a job runs]. +//Click the image:question_circle.png[Help,15,15] icon for additional information. + //[NOTE] //==== @@ -72,5 +88,4 @@ This is particularly useful because you can paste that group name into the limit //See Example 1 in xref:ref-controller-smart-host-filter[Smart host filters]. . Click btn:[Create inventory]. -After saving the new inventory, you can proceed with configuring permissions, groups, hosts, sources, and view completed jobs, if -applicable to the type of inventory. +After saving the new inventory, you can proceed with configuring permissions, groups, hosts, sources, and view completed jobs, if applicable to the type of inventory. diff --git a/downstream/modules/platform/proc-controller-adding-new-schedule.adoc b/downstream/modules/platform/proc-controller-adding-new-schedule.adoc index d7841d1c99..90bcaf7989 100644 --- a/downstream/modules/platform/proc-controller-adding-new-schedule.adoc +++ b/downstream/modules/platform/proc-controller-adding-new-schedule.adoc @@ -10,9 +10,20 @@ To create a new schedule on the *Schedules* page: . From the navigation panel, select {MenuAESchedules}. . Click btn:[Create schedule]. This opens the *Create Schedule* window. +. Select a *Resource type* onto which this schedule is applied. + -image::ug-generic-create-schedule.png[Create schedule] -+ +Select from: + +* *Job template* +** For *Job template* select a *Job template* from the menu. +* *Workflow job template* +** For *Workflow job template* select a *Workflow job template* from the menu. +* *Inventory source* +** For *Inventory source* select an *Inventory* and an *Inventory source* from the appropriate menu. +* *Project sync* +** For *Project sync* select a *Project* from the menu. +* *Management job template* +** For *Management job template* select a *Workflow job template* from the menu. To create a new schedule from a resource page: @@ -22,7 +33,7 @@ This can be a template, project, or inventory source. . Click btn:[Create schedule]. This opens the *Create Schedule* window. .For both procedures -. Enter the appropriate details into the following fields: +. For *Job template* and *Project sync* enter the appropriate details into the following fields: * *Schedule name*: Enter the name. * Optional: *Description*: Enter a description. @@ -43,4 +54,4 @@ To ensure your schedules are correctly created, set your schedules in UTC time. . Click btn:[Next]. The *Define rules* page is displayed. -//Use the *On* or *Off* toggle to stop an active schedule or activate a stopped schedule. + diff --git a/downstream/modules/platform/proc-controller-adding-subscription-manually.adoc b/downstream/modules/platform/proc-controller-adding-subscription-manually.adoc index 3b0cdb2532..ea2b2c7d64 100644 --- a/downstream/modules/platform/proc-controller-adding-subscription-manually.adoc +++ b/downstream/modules/platform/proc-controller-adding-subscription-manually.adoc @@ -2,7 +2,7 @@ = Add a subscription manually -If you are unable to apply or update the subscription information by using the {ControllerName} user interface, you can upload the subscriptions manifest manually in an Ansible playbook. +If you are unable to apply or update the subscription information by using the {ControllerName} user interface, you can upload the subscriptions manifest manually in an Ansible Playbook. Use the license module in the `ansible.controller` collection: diff --git a/downstream/modules/platform/proc-controller-amazon-ec2.adoc b/downstream/modules/platform/proc-controller-amazon-ec2.adoc index bf6eb9ed40..c75bc1602f 100644 --- a/downstream/modules/platform/proc-controller-amazon-ec2.adoc +++ b/downstream/modules/platform/proc-controller-amazon-ec2.adoc @@ -8,20 +8,21 @@ Use the following procedure to configure an AWS EC2-sourced inventory. //[ddacosta] Rewrote this according to IBM style: Refer to a drop-down list by its label, followed by list. . From the navigation panel, select {MenuInfrastructureInventories}. . Select the inventory name you want a source to and click the *Sources* tab. -. Click btn:[Add source]. -. In the *Add new source* page, select *Amazon EC2* from the *Source* list. -. The *Add new source* window expands with additional fields. +. Click btn:[Create source]. +. In the *Create source* page, select *Amazon EC2* from the *Source* list. +. The *Create source* window expands with additional fields. Enter the following details: -* Optional: *Credential*: Choose from an existing AWS credential (for more information, see xref:controller-credentials[Credentials]). +* Optional: *Credential*: Choose from an existing AWS credential. +For more information, see xref:controller-credentials[Managing user credentials]. + If {ControllerName} is running on an EC2 instance with an assigned IAM Role, the credential can be omitted, and the security credentials from the instance metadata are used instead. -For more information about using IAM Roles, see link:http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-%20roles-for-amazon-ec2.html[IAM_Roles_for_Amazon_EC2_documentation_at_Amazon]. +For more information about using IAM Roles, see link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[IAM roles for Amazon EC2_documentation_at_Amazon] documentation at Amazon. . Optional: You can specify the verbosity, host filter, enabled variables or values, and update options as described in xref:proc-controller-add-source[Adding a source]. -. Use the *Source Variables* field to override variables used by the `aws_ec2` inventory plugin. +. Use the *Source variables* field to override variables used by the `aws_ec2` inventory plugin. Enter variables by using either JSON or YAML syntax. Use the radio button to toggle between the two. For more information about these variables, see the diff --git a/downstream/modules/platform/proc-controller-api-browsing-api.adoc b/downstream/modules/platform/proc-controller-api-browsing-api.adoc index 4a728a4405..8846677b01 100644 --- a/downstream/modules/platform/proc-controller-api-browsing-api.adoc +++ b/downstream/modules/platform/proc-controller-api-browsing-api.adoc @@ -4,7 +4,7 @@ . Go to the {ControllerName} REST API in a web browser at: + -\https:///api/ +\https:///api/controller/v2 + . Click the **"v2"** link next to **"current versions"** or **"available versions"**. {ControllerNameStart} supports version 2 of the API. diff --git a/downstream/modules/platform/proc-controller-api-session-auth.adoc b/downstream/modules/platform/proc-controller-api-session-auth.adoc index a21dc8a29a..32f4ea38c5 100644 --- a/downstream/modules/platform/proc-controller-api-session-auth.adoc +++ b/downstream/modules/platform/proc-controller-api-session-auth.adoc @@ -71,5 +71,5 @@ The default value is `awx_session_id` which you can see later in the `Set-Cookie [NOTE] ==== You can change the session expiration time by specifying it in the `SESSION_COOKIE_AGE` parameter. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#controller-work-with-session-limits[Working with session limits]. +//For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#controller-work-with-session-limits[Working with session limits]. ==== diff --git a/downstream/modules/platform/proc-controller-apps-create-tokens.adoc b/downstream/modules/platform/proc-controller-apps-create-tokens.adoc index 0af8467f8a..8a2faaf66c 100644 --- a/downstream/modules/platform/proc-controller-apps-create-tokens.adoc +++ b/downstream/modules/platform/proc-controller-apps-create-tokens.adoc @@ -2,49 +2,51 @@ = Adding tokens -You can view a list of users that have tokens to access an application by selecting the *Tokens* tab in the applications *Details* page. +You can view a list of users that have tokens to access an application by selecting the *Tokens* tab in the *OAuth Applications* details page. -Configure authentication tokens for your users. -You can select the application to which the token is associated and the level of access that the token has. - -[IMPORTANT] +[NOTE] ==== -You can only create OAuth 2 Tokens for your user through the API or UI, which means you can only access your own user profile to configure or view your tokens. +You can only create OAuth 2 Tokens for your own user, which means you can only configure or view tokens from your own user profile. ==== +When authentication tokens have been configured, you can select the application to which the token is associated and the level of access that the token has. + + .Procedure . From the navigation panel, select {MenuControllerUsers}. -. Select the user for which you want to configure the OAuth 2 tokens. -. Select the *Tokens* tab on the user's profile. +. Select the username for your user profile to configure OAuth 2 tokens. +. Select the *Tokens* tab. + When no tokens are present, the *Tokens* screen prompts you to add them. . Click btn:[Create token] to open the *Create Token* window. . Enter the following details: - -* *Application*: enter the name of the application with which you want to associate your token. -You can also search for it by clicking the image:search.png[Search,15,15] icon. -This opens a separate window that enables you to choose from the available options. -Use the Search bar to filter by name if the list is extensive. -Leave this field blank if you want to create a Personal Access Token (PAT) that is not linked to any application. -* Optional: *Description*: give a short description for your token. -* *Scope* (required): specify the level of access you want this token to have. - -. Click btn:[Create token], or click btn:[Cancel] to abandon your changes. + -After you save the token, the newly created token for the user is displayed with the token information and when it expires. +Application:: Enter the name of the application with which you want to associate your token. Alternatively, you can search for it by clicking btn:[Browse]. This opens a separate window that enables you to choose from the available options. Select *Name* from the filter list to filter by name if the list is extensive. + -//image:users-token-information-example.png[Token information] - -. To view the application to which the token is associated and the token expiration date, go to the token list view. +[NOTE] +==== +To create a Personal Access Token (PAT) that is not linked to any application, leave the Application field blank. +==== +Description:: (optional) Provide a short description for your token. +Scope:: (required) Specify the level of access you want this token to have. The scope of an OAuth 2 token can be set as one of the following: ++ +* *Write*: Allows requests sent with this token to add, edit and delete resources in the system. +* *Read*: Limits actions to read only. Note that the write scope includes read scope. ++ +. Click btn:[Create token], or click btn:[Cancel] to abandon your changes. + -//image:users-token-assignment-example.png[Token assignment] +The Token information is displayed with *Token* and *Refresh Token* information, and the expiration date of the token. This will be the only time the token and refresh token will be shown. You can view the token association and token information from the list view. ++ +. Click the copy icon and save the token and refresh token for future use. .Verification -To verify that the application now shows the user with the appropriate token, open the *Tokens* tab of the Applications window. +You can verify that the application now shows the user with the appropriate token using the Tokens tab on the Applications details page. -//image:apps-tokens-list-view-example2.png[image] +. From the navigation panel, select {MenuAMAdminOauthApps}. +. Select the application you want to verify from the *Applications* list view. +. Select the *Tokens* tab. ++ +Your token should be displayed in the list of tokens associated with the application you chose. .Additional resources - -If you are a system administrator and have to create or remove tokens for other users, see the revoke and create commands in the -link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/assembly-controller-awx-manage-utility#ref-controller-token-session-management[Token and session management] section of the _{ControllerAG}_. +If you are a system administrator and have to create or remove tokens for other users, see the revoke and create commands in xref:ref-controller-token-session-management[Token and session management]. diff --git a/downstream/modules/platform/proc-controller-associate-instances-to-instance-group.adoc b/downstream/modules/platform/proc-controller-associate-instances-to-instance-group.adoc index f5063657ae..0e061e9461 100644 --- a/downstream/modules/platform/proc-controller-associate-instances-to-instance-group.adoc +++ b/downstream/modules/platform/proc-controller-associate-instances-to-instance-group.adoc @@ -5,7 +5,7 @@ .Procedure . Select the *Instances* tab on the *Details* page of an Instance Group. -. Click btn:[Associate]. +. Click btn:[Associate instance]. . Click the checkbox next to one or more available instances from the list to select the instances you want to associate with the instance group and click btn:[Confirm] //+ //image::instance-group-assoc-instances.png[Associate instances] diff --git a/downstream/modules/platform/proc-controller-azure-resource-manager.adoc b/downstream/modules/platform/proc-controller-azure-resource-manager.adoc index 1d3db67ec8..0ce1bcb912 100644 --- a/downstream/modules/platform/proc-controller-azure-resource-manager.adoc +++ b/downstream/modules/platform/proc-controller-azure-resource-manager.adoc @@ -8,14 +8,14 @@ Use the following procedure to configure an {Azure} Resource Manager-sourced inv //[ddacosta] Rewrote this according to style for drop-down lists; see Usage and highlighting for interface elements in the IBM Style Guide . From the navigation panel, select {MenuInfrastructureInventories}. . Select the inventory name you want a source to and click the *Sources* tab. -. Click btn:[Add source]. -. In the *Add new source* page, select *Microsoft Azure Resource Manager* from the *Source* list. -. The *Add new source* window expands with the required *Credential* field. -Choose from an existing Azure Credential. -For more information, see xref:controller-credentials[Credentials]. +. Click btn:[Create source]. +. In the *Create source* page, select *Microsoft Azure Resource Manager* from the *Source* list. +. Enter the following details in the additional fields: +. Optional: *Credential*: Choose from an existing Azure Credential. +For more information, see xref:controller-credentials[Managing user credentials]. . Optional: You can specify the verbosity, host filter, enabled variables or values, and update options as described in xref:proc-controller-add-source[Adding a source]. -. Use the *Source Variables* field to override variables used by the `azure_rm` inventory plugin. +. Use the *Source variables* field to override variables used by the `azure_rm` inventory plugin. Enter variables by using either JSON or YAML syntax. Use the radio button to toggle between the two. For more information about these variables, see the diff --git a/downstream/modules/platform/proc-controller-build-workflow.adoc b/downstream/modules/platform/proc-controller-build-workflow.adoc index 5ca022f9e1..fbd4a91237 100644 --- a/downstream/modules/platform/proc-controller-build-workflow.adoc +++ b/downstream/modules/platform/proc-controller-build-workflow.adoc @@ -15,17 +15,16 @@ Each node is represented by a rectangle while the relationships and their associ . To launch the workflow visualizer, use one of these methods: ** From the navigation panel, select {MenuAETemplates}. -... Select a workflow template, in the *Details* tab click btn:[Edit template]. -... Select the *Visualizer* tab. -** From the *Templates* list view, click the image:visualizer.png[Visualizer,15,15] icon. +... Select a workflow template and click btn:[View workflow visualizer]. +** From the *Templates* list view, click the image:visualizer.png[Visualizer,15,15] icon next to a workflow job template. + //image::ug-wf-editor-create.png[Launch visualizer] + -. Click btn:[Start] to display a list of nodes to add to your workflow. +. Click btn:[Add step] to display a list of nodes to add to your workflow. + //image::ug-wf-add-template-nodes.png[Add Node workflow job template] + -. From the *Node Type* list, select the type of node that you want to add. +. From the *Node type* list, select the type of node that you want to add. + //image::ug-wf-add-node-selections.png[Node type] + @@ -43,9 +42,9 @@ Though a credential is not required in a job template, you cannot select a job t This action is also referred to as edge type. . If the node is a root node, the edge type defaults to *Always* and is non-editable. For subsequent nodes, you can select one of the following scenarios (edge type) to apply to each: -* *Always*: Continue to execute regardless of success or failure. -* *On Success*: After successful completion, execute the next template. -* *On Failure*: After failure, execute a different template. +* *Always run*: Continue to execute regardless of success or failure. +* *Run on success*: After successful completion, execute the next template. +* *Run on fail*: After failure, execute a different template. . Select the behavior of the node if it is a convergent node from the *Convergence* field: * *Any* is the default behavior, allowing any of the nodes to complete as specified, before triggering the next converging node. If the status of one parent meets one of those run conditions, an *any* child node will run. @@ -72,7 +71,7 @@ Use the wizard to change the values in each of the tabs and click btn:[Confirm] If a workflow template used in the workflow has *Prompt on launch* selected for the inventory option, use the wizard to supply the inventory at the prompt. If the parent workflow has its own inventory, it overrides any inventory that is supplied here. + -image::ug-wf-prompt-button-inventory-wizard.png[Prompt button inventory] +//image::ug-wf-prompt-button-inventory-wizard.png[Prompt button inventory] + [NOTE] ==== @@ -93,17 +92,17 @@ Otherwise, any changes you make revert back to the values set in the job templat + When the node is created, it is labeled with its job type. A template that is associated with each workflow node runs based on the selected run scenario as it proceeds. -Click the compass (image:compass.png[Compass, 15,15]) icon to display the legend for each run scenario and their job types. +Click btn:[Legend] to display the legend for each run scenario and their job types. + -image::ug-wf-dropdown-list.png[Worfklow dropdown list] +image::ug-wf-dropdown-list.png[Workflow dropdown list] + -. Hover over a node to add another node, view info about the node, edit the node details, edit an existing link, or delete the selected node: +. Hover over a node to edit the node, add step and link, or delete the selected node: + image::ug-wf-add-template.png[Node options] + -. When you have added or edited a node, click btn:[SELECT] to save any modifications and render it on the graphical view. +. When you have added or edited a node, click btn:[Finish] to save any modifications and render it on the graphical view. For possible ways to build your workflow, see xref:controller-building-nodes-scenarios[Building nodes scenarios]. -. When you have built your workflow job template, click btn:[Create workflow job template] to save your entire workflow template and return to the new workflow job template details page. +. When you have built your workflow job template, click btn:[Save] to save your entire workflow template and return to the new workflow job template details page. [IMPORTANT] ==== diff --git a/downstream/modules/platform/proc-controller-building-nodes-scenarios.adoc b/downstream/modules/platform/proc-controller-building-nodes-scenarios.adoc index 873df66f6b..18fe3d6b13 100644 --- a/downstream/modules/platform/proc-controller-building-nodes-scenarios.adoc +++ b/downstream/modules/platform/proc-controller-building-nodes-scenarios.adoc @@ -6,45 +6,45 @@ Learn how to manage nodes in the following scenarios. .Procedure -* Click the (image:plus_icon_dark.png[Plus icon,15,15]) icon on the parent node to add a sibling node: - +* Click the (image:options_menu.png[Plus icon,15,15]) icon on the parent node and *Add step and link* to add a sibling node: ++ image::ug-wf-create-sibling-node.png[Create sibling node] - -* Hover over the line that connects two nodes and click the plus (image:plus_icon_dark.png[Plus icon,15,15]), to insert another node in between nodes. -Clicking the plus (image:plus_icon_dark.png[Plus icon,15,15]) icon automatically inserts the node between the two nodes: - -image::ug-wf-editor-insert-node-template.png[Insert node template] - -* Click btn:[START] again, to add a root node to depict a split scenario: - -image::ug-wf-create-new-add-template-split.png[Node split scenario] - -* At any node where you want to create a split scenario, hover over the node from which the split scenario begins and click the plus (image:plus_icon_dark.png[Plus icon,15,15]) icon. -This adds multiple nodes from the same parent node, creating sibling nodes: - -image::ug-wf-create-siblings.png[Node create siblings] - -[NOTE] -==== -When adding a new node, the btn:[PROMPT] option also applies to workflow templates. -Workflow templates prompt for inventory and surveys. -==== - -* You can undo the last inserted node by using one of these methods: -** Click on another node without making a selection. -** Click btn:[Cancel]. - -The following example workflow contains all three types of jobs initiated by a job template. -If it fails to run, you must protect the sync job. -Regardless of whether it fails or succeeds, proceed to the inventory sync job: - -image::ug-wf-add-template-example.png[Workflow template example] - -Refer to the key by clicking the compass (image:compass.png[Compass, 15,15]) icon to identify the meaning of the symbols and colors associated with the graphical depiction. ++ +//. Hover over the line that connects two nodes and click the plus (image:plus_icon_dark.png[Plus icon,15,15]), to insert another node in between nodes. +//Clicking the plus (image:plus_icon_dark.png[Plus icon,15,15]) icon automatically inserts the node between the two nodes: ++ +//image::ug-wf-editor-insert-node-template.png[Insert node template] ++ +* Click btn:[Add step] or btn:[Start] (image:options_menu.png[Plus icon,15,15]) and *Add step*, to add a root node to depict a split scenario. ++ +//image::ug-wf-create-new-add-template-split.png[Node split scenario] ++ +* At any node where you want to create a split scenario, hover over the node from which the split scenario begins and click the plus (image:options_menu.png[Plus icon,15,15]) icon on the parent node and *Add step and link*. +This adds multiple nodes from the same parent node, creating sibling nodes. ++ +//image::ug-wf-create-siblings.png[Node create siblings] ++ +//[NOTE] +//==== +//When adding a new node, the btn:[PROMPT] option also applies to workflow templates. +//Workflow templates prompt for inventory and surveys. +//==== + +//* You can undo the last inserted node by using one of these methods: +//** Click on another node without making a selection. +//** Click btn:[Cancel]. + +//The following example workflow contains all three types of jobs initiated by a job template. +//If it fails to run, you must protect the sync job. +//Regardless of whether it fails or succeeds, proceed to the inventory sync job: + +//image::ug-wf-add-template-example.png[Workflow template example] + +Refer to the key by clicking btn:[Legend] to identify the meaning of the symbols and colors associated with the graphical depiction. [NOTE] ==== If you remove a node that has a follow-on node attached to it in a workflow with a set of sibling nodes that has varying edge types, the attached node automatically joins the set of sibling nodes and retains its edge type: -image::ug-wf-node-delete-scenario.png[Node delete scenario] +//image::ug-wf-node-delete-scenario.png[Node delete scenario] ==== diff --git a/downstream/modules/platform/proc-controller-cluster-deprovision-instances.adoc b/downstream/modules/platform/proc-controller-cluster-deprovision-instances.adoc index 18b6603bf4..aec8f0b50b 100644 --- a/downstream/modules/platform/proc-controller-cluster-deprovision-instances.adoc +++ b/downstream/modules/platform/proc-controller-cluster-deprovision-instances.adoc @@ -22,4 +22,4 @@ $ awx-manage deprovision_instance --hostname=hostB ---- Deprovisioning instance groups in {ControllerName} does not automatically deprovision or remove instance groups. -For more information, see the xref:controller-deprovision-instance-group[Deprovisioning instance groups] section. +For more information, see the link:{URLControllerUserGuide}/controller-instance-and-container-groups#controller-deprovision-instance-group[Deprovisioning instance groups] section in _{ControllerUG}_. diff --git a/downstream/modules/platform/proc-controller-configure-analytics.adoc b/downstream/modules/platform/proc-controller-configure-analytics.adoc new file mode 100644 index 0000000000..90d5be03f0 --- /dev/null +++ b/downstream/modules/platform/proc-controller-configure-analytics.adoc @@ -0,0 +1,30 @@ +[id="proc-controller-configure-analytics"] + += Configuring {Analytics} + +When you imported your license for the first time, you were automatically opted in for the collection of data that powers {Analytics}, a cloud service that is part of the {PlatformNameShort} subscription. + +.Procedure +. From the navigation panel, select {MenuSetSubscription}. +The *Subscription* page is displayed. +. If you have not already set up a subscription, do so now, and ensure that on the next page you have selected *{Analytics}* to use analytics data to enhance future releases of {PlatformNameShort} and to provide the Red Hat insights service to subscribers. ++ +image::automation_analytics.png[Automation analytics page] + +. From the navigation panel, select {MenuSetSystem}. +. Click btn:[Edit]. +. Toggle the *Gather data for {Analytics}* switch and enter your Red Hat customer credentials. +. You can also configure the following options: ++ +* *Red Hat Customer Name*: This username is used to send data to {Analytics}. +* *Red Hat Customer Password*: This password is used to send data to {Analytics}. +* *Red Hat or Satellite Username*: This username is used to send data to {Analytics}. +* *Red Hat or Satellite password*: This password is used to send data to {Analytics}. +* *Last gather date for Automation Analytics*: Set the date and time +* *Automation Analytics Gather Interval*: Interval (in seconds) between data gathering. ++ +. Click btn:[Save]. +//This field has been removed. +//* *Last gathered entries from the data collection service of {Analytics}*: TBD + + diff --git a/downstream/modules/platform/proc-controller-configure-jobs.adoc b/downstream/modules/platform/proc-controller-configure-jobs.adoc index 35259aa1e1..6a239484b3 100644 --- a/downstream/modules/platform/proc-controller-configure-jobs.adoc +++ b/downstream/modules/platform/proc-controller-configure-jobs.adoc @@ -2,17 +2,98 @@ = Configuring jobs -The *Jobs* tab enables you to configure the types of modules that can be used by the {ControllerName}'s Ad Hoc Commands feature, set limits on the number of jobs that can be scheduled, define their output size, and other details pertaining to working with jobs in {ControllerName}. +You can use the *Job* option to define the operation of Jobs in {ControllerName}. .Procedure -. From the navigation panel, select {MenuAEAdminSettings}. -. Select *Jobs settings* in the *Jobs* option. -. Click btn:[Edit]. -. Set the configurable options from the fields provided. +. From the navigation panel, select {MenuSetJob}. +. On the *Job Settings* page, click btn:[Edit]. ++ +image::job-settings-full.png[Jobs settings options] ++ +. You can configure the following options: + +* *Ansible Modules Allowed For Ad Hoc Jobs*: List of modules allowed to be used by ad hoc jobs. ++ +The directory in which the service creates new temporary directories for job execution and isolation (such as credential files). +* *When can extra variables contain Jinja templates?*: Ansible allows variable substitution through the Jinja2 templating language for `--extra-vars`. ++ +This poses a potential security risk where users with the ability to specify extra vars at job launch time can use Jinja2 templates to run arbitrary Python. ++ +Set this value to either `template` or `never`. ++ +* *Paths to expose to isolated jobs*: List of paths that would otherwise be hidden to expose to isolated jobs. ++ +Enter one path per line. ++ +Volumes are mounted from the execution node to the container. ++ +The supported format is `HOST-DIR[:CONTAINER-DIR[:OPTIONS]]`. ++ +* *Extra Environment Variables*: Additional environment variables set for playbook runs, inventory updates, project updates, and notification sending. +* *K8S Ansible Runner Keep-Alive Message Interval*: Only applies to jobs running in a Container Group. ++ +If not 0, send a message every specified number of seconds to keep the connection open. ++ +* *Environment Variables for Galaxy Commands*: Additional environment variables set for invocations of ansible-galaxy within project updates. +Useful if you must use a proxy server for ansible-galaxy but not git. +* *Standard Output Maximum Display Size*: Maximum Size of Standard Output in bytes to display before requiring the output be downloaded. +* *Job Event Standard Output Maximum Display Size*: Maximum Size of Standard Output in bytes to display for a single job or ad hoc command event. stdout ends with `…` when truncated. +* *Job Event Maximum Websocket Messages Per Second*: The maximum number of messages to update the UI live job output with per second. ++ +A value of 0 means no limit. +* *Maximum Scheduled Jobs*: Maximum number of the same job template that can be waiting to run when launching from a schedule before no more are created. +* *Ansible Callback Plugins*: List of paths to search for extra callback plugins to be used when running jobs. +* *Default Job Timeout*: If no output is detected from ansible in this number of seconds the execution will be terminated. ++ +Use a value of 0 to indicate that no idle timeout should be imposed. ++ +Enter one path per line. +* *Default Job Idle Timeout*: If no output is detected from ansible in this number of seconds the execution will be terminated. ++ +Use a value of 0 to indicate that no idle timeout should be imposed. +* *Default Inventory Update Timeout*: Maximum time in seconds to allow inventory updates to run. ++ +Use a value of 0 to indicate that no timeout should be imposed. ++ +A timeout set on an individual inventory source will override this. +* *Default Project Update Timeout*: Maximum time in seconds to allow project updates to run. ++ +Use a value of 0 to indicate that no timeout should be imposed. ++ +A timeout set on an individual project will override this. +* *Per-Host Ansible Fact Cache Timeout*: Maximum time, in seconds, that stored Ansible facts are considered valid since the last time they were modified. ++ +Only valid, non-stale, facts are accessible by a playbook. ++ +This does not influence the deletion of `ansible_facts` from the database. ++ +Use a value of 0 to indicate that no timeout should be imposed. +* *Maximum number of forks per job*: Saving a Job Template with more than this number of forks results in an error. ++ +When set to 0, no limit is applied. +* *Job execution path*: Only available in operator-based installations. +* *Container Run Options*: Only available in operator-based installations. ++ +List of options to pass to Podman run example: `['--network', 'slirp4netns:enable_ipv6=true', '--log-level', 'debug']`. ++ +You can set the following options: ++ +* *Run Project Updates With Higher Verbosity*: Select to add the CLI `-vvv` flag to playbook runs of `project_update.yml` used for project updates +* *Enable Role Download*: Select to allow roles to be dynamically downloaded from a `requirements.yml` file for SCM projects. +* *Enable Collection(s) Download*: Select to allow collections to be dynamically downloaded from a `requirements.yml` file for SCM projects. +* *Follow symlinks*: Select to follow symbolic links when scanning for playbooks. ++ +Be aware that setting this to `True` can lead to infinite recursion if a link points to a parent directory of itself. +* *Expose host paths for Container Groups*: Select to expose paths through hostPath for the Pods created by a Container Group. ++ +HostPath volumes present many security risks, and it is best practice to avoid the use of HostPaths when possible. ++ +*Ignore Ansible Galaxy SSL Certificate Verification*: If set to `true`, certificate validation is not done when installing content from any Galaxy server. ++ Click the tooltip image:question_circle.png[Tool tip,15,15] icon next to the field that you need additional information about. + -For more information about configuring Galaxy settings, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#ref-projects-galaxy-support[Ansible Galaxy Support] section of the _{ControllerUG}_. +For more information about configuring Galaxy settings, see the link:{URLControllerUserGuide}/controller-projects#ref-projects-galaxy-support[Ansible Galaxy Support] section of _{ControllerUG}_. + [NOTE] ==== diff --git a/downstream/modules/platform/proc-controller-configure-secret-lookups.adoc b/downstream/modules/platform/proc-controller-configure-secret-lookups.adoc index 3e3f215c65..d666ac3560 100644 --- a/downstream/modules/platform/proc-controller-configure-secret-lookups.adoc +++ b/downstream/modules/platform/proc-controller-configure-secret-lookups.adoc @@ -52,7 +52,7 @@ You return to the *Details* screen of your target credential. . Repeat these steps, starting with Step 3 to complete the remaining input fields for the target credential. By linking the information in this manner, {ControllerName} retrieves sensitive information, such as username, password, keys, certificates, and tokens from the third-party management systems and populates the remaining fields of the target credential form with that data. . If necessary, supply any information manually for those fields that do not use linking as a way of retrieving sensitive information. -For more information about each of the fields, see the appropriate xref:ref-controller-credential-types[Credential Types]. +For more information about each of the fields, see the appropriate [Credential Types]. . Click btn:[Save]. .Additional resources diff --git a/downstream/modules/platform/proc-controller-configure-subscriptions.adoc b/downstream/modules/platform/proc-controller-configure-subscriptions.adoc new file mode 100644 index 0000000000..6e35ec2b47 --- /dev/null +++ b/downstream/modules/platform/proc-controller-configure-subscriptions.adoc @@ -0,0 +1,17 @@ +[id="proc-controller-configure-subscriptions"] + += Configuring subscriptions + +You can use the *Subscription* menu to view the details of your subscription, such as compliance, host-related statistics, or expiry, or you can apply a new subscription. + +.Procedure +. From the navigation panel, select {MenuSetSubscription}. The *Subscription* page is displayed. +//[ddacosta] - Removing images but they can be added back if requested. +//image::settings_subscription_page.png[Initial subscriptions page] +. Click btn:[Edit subscription]. +. You can either enter your Red Hat Username and Password, or attach a current Subscription Manifest in the *Welcome* page. +//[ddacosta] - Removing images but they can be added back if requested. +//image::subscriptions_first-page.png[Suscriptions page for password or manifest] +. Click btn:[Next] and agree to the terms of the license agreement. +. Click btn:[Next] to review the subscription settings. +. Click btn:[Finish] to complete the configuration. diff --git a/downstream/modules/platform/proc-controller-configure-system.adoc b/downstream/modules/platform/proc-controller-configure-system.adoc index 1d61859966..efb9e19710 100644 --- a/downstream/modules/platform/proc-controller-configure-system.adoc +++ b/downstream/modules/platform/proc-controller-configure-system.adoc @@ -2,32 +2,57 @@ = Configuring system settings -The *System* tab enables you to complete the following actions: - -* Define the base URL for the {ControllerName} host -//* Configure alerts -* Enable activity capturing -* Control visibility of users -* Set {ControllerName} analytics settings -//* Enable certain {ControllerName} features and functionality through a license file -//* Configure logging aggregation options +You can use the *System* menu to define automation controller system settings. .Procedure . From the navigation panel, select {MenuSetSystem}. -. Click btn:[Edit]. -//. Choose from the following *System* options: -//* *Miscellaneous System settings*: Enable activity streams, specify the default {ExecEnvShort}, define the base URL for the {ControllerName} host, enable {ControllerName} administration alerts, set user visibility, define analytics, specify usernames and passwords, and configure proxies. -//* *Miscellaneous Authentication settings*: Configure options associated with authentication methods (built-in or SSO), sessions (timeout, number of sessions logged in, tokens), and social authentication mapping. -//* *Logging settings*: Configure logging options based on the type you choose: -//+ -//image::ag-configure-aap-system-logging-types.png[Logging settings] -//+ -//For more information about each of the logging aggregation types, see the xref:assembly-controller-logging-aggregation[Logging and Aggregation] section. -. Set the configurable options from the fields provided. -Click the tooltip image:question_circle.png[Tool tip,15,15] icon next to the field that you need additional information about. +The *System Settings* page is displayed. //+ -//The following is an example of the *Miscellaneous System* settings: -//+ -//image::ag-configure-aap-system.png[Misc. system settings] -. Click btn:[Save] to apply the settings. +//image::system-settings-page.png[System settings page - unedited] +. Click btn:[Edit]. +//+ +//image::system-settings-full.png[System settings - configurable fields] +. You can configure the following options: ++ +* *Base URL of the service*: This setting is used by services such as notifications to render a valid URL to the service. +* *Proxy IP allowed list*: If the service is behind a reverse proxy or load balancer, use this setting to configure the proxy IP addresses from which the service should trust custom `REMOTE_HOST_HEADERS` header values. ++ +If this setting is an empty list (the default), the headers specified by `REMOTE_HOST_HEADERS` are trusted unconditionally. +* *CSRF Trusted Origins List*: If the service is behind a reverse proxy or load balancer, use this setting to configure the `schema://addresses` from which the service should trust Origin header values. +* *Red Hat customer username*: This username is used to send data to Automation Analytics. +* *Red Hat customer password*: This password is used to send data to Automation Analytics. +* *Red Hat or Satellite username*: This username is used to send data to Automation Analytics. +* *Red Hat or Satellite password*: This password is used to send data to Automation Analytics. +* *Global default {ExecEnvShort}*: The {ExecEnvShort} to be used when one has not been configured for a job template. +* *Custom virtual environment paths*: Paths where {ControllerName} looks for custom virtual environments. ++ +Enter one path per line. ++ +* *Last gather date for Automation Analytics*: Set the date and time. +//This field has been removed by https://github.com/ansible/awx/pull/15497 +//* *Last gathered entries from the data collection service of {Analytics}*: Do not enter anything in this field. +* *{Analytics} Gather Interval*: Interval (in seconds) between data gathering. ++ +If *Gather data for {Analytics}* is set to false, this value is ignored. ++ +* *Last cleanup date for HostMetrics*: Set the date and time. +* *Last computing date of HostMetricSummaryMonthly*: Set the date and time. +* *Remote Host Headers*: HTTP headers and meta keys to search to decide remote hostname or IP. +Add additional items to this list, such as `HTTP_X_FORWARDED_FOR`, if behind a reverse proxy. +For more information, see link:{URLAAPOperationsGuide}/assembly-configuring-proxy-support[Configuring proxy support for {PlatformName}]. +* *Automation Analytics upload URL*: This value has been set manually in a settings file. +This setting is used to configure the upload URL for data collection for Automation Analytics. +* *Defines subscription usage model and shows Host Metrics*: ++ +You can select the following options: ++ +* *Enable Activity Stream*: Set to enable capturing activity for the activity stream. +* *Enable Activity Stream for Inventory Sync*: Set to enable capturing activity for the activity stream when running inventory sync. +* *All Users Visible to Organization Admins*: Set to control whether any organization administrator can view all users and teams, even those not associated with their organization. +* *Organization Admins Can Manage Users and Teams*: Set to control whether any organization administrator has the privileges to create and manage users and teams. ++ +You might want to disable this ability if you are using an LDAP or SAML integration. +* *Gather data for Automation Analytics*: Set to enable the service to gather data on automation and send it to {Analytics}. + +. Click btn:[Save] diff --git a/downstream/modules/platform/proc-controller-configure-transparent-SAML.adoc b/downstream/modules/platform/proc-controller-configure-transparent-SAML.adoc index d8a91abaea..caf9abf78e 100644 --- a/downstream/modules/platform/proc-controller-configure-transparent-SAML.adoc +++ b/downstream/modules/platform/proc-controller-configure-transparent-SAML.adoc @@ -6,17 +6,4 @@ For transparent logins to work, you must first get IdP-initiated logins to work. .Procedure -. Set the `RelayState` on the IdP to the key of the IdP definition in the *SAML Enabled Identity Providers* field. -. When this is working, specify the redirect URL for non-logged-in users to somewhere other than the default {ControllerName} login page by using the *Login redirect override URL* field in the *Miscellaneous Authentication* settings window of the {MenuAEAdminSettings} menu. -You must set this to `/sso/login/saml/?idp=` for transparent SAML login, as shown in the following example: -+ -image::ag-configure-system-login-redirect-url.png[Configure SAML login] -+ -[NOTE] -==== -This example shows a typical IdP format, but might not be the correct format for your particular case. -You might need to reach out to your IdP for the correct transparent redirect URL as that URL is not the same for all IdPs. -==== -+ -. After you configure transparent SAML login, to log in using local credentials or a different SSO, go directly to `https:///login`. -This provides the standard {ControllerName} login page, including SSO authentication options, enabling you to log in with any configured method. +* Set the `RelayState` on the IdP to "IdP". diff --git a/downstream/modules/platform/proc-controller-copy-a-job-template.adoc b/downstream/modules/platform/proc-controller-copy-a-job-template.adoc index b3c9117ce9..5b4ba0edf5 100644 --- a/downstream/modules/platform/proc-controller-copy-a-job-template.adoc +++ b/downstream/modules/platform/proc-controller-copy-a-job-template.adoc @@ -4,7 +4,7 @@ If you copy a job template, it does not copy any associated schedule, notifications, or permissions. Schedules and notifications must be recreated by the user or administrator creating the copy of the job template. -The user copying the Job Template is be granted administrator permission, but no permissions are assigned (copied) to the job template. +The user copying the Job Template is granted administrator permission, but no permissions are assigned (copied) to the job template. .Procedure @@ -12,5 +12,5 @@ The user copying the Job Template is be granted administrator permission, but no . Click image:options_menu.png[options menu,15,15] and the copy image:copy.png[Copy,15,15] icon associated with the template that you want to copy. * The new template with the name of the template from which you copied and a timestamp displays in the list of templates. . Click to open the new template and click btn:[Edit template]. -. Replace the contents of the *Name* field with a new name, and provide or modify the entries in the other fields to complete this page. +. Replace the contents of the *Name* field with a new name, and give or change the entries in the other fields to complete this page. . Click btn:[Save job template]. diff --git a/downstream/modules/platform/proc-controller-create-application.adoc b/downstream/modules/platform/proc-controller-create-application.adoc index 7bed63c376..5a177e9cff 100644 --- a/downstream/modules/platform/proc-controller-create-application.adoc +++ b/downstream/modules/platform/proc-controller-create-application.adoc @@ -2,7 +2,8 @@ = Creating a new application -When integrating an external web application with {ControllerName} the web application might need to create OAuth2 Tokens on behalf of users of the web application. +When integrating an external web application with {PlatformNameShort}, the web application might need to create OAuth2 tokens on behalf of users of the web application. + Creating an application with the Authorization Code grant type is the preferred way to do this for the following reasons: * External applications can obtain a token for users, using their credentials. @@ -11,25 +12,32 @@ For example, revoking all tokens associated with that application. .Procedure . From the navigation panel, select {MenuAMAdminOauthApps}. -. Click btn:[Create application]. +. Click btn:[Create OAuth application]. The *Create Application* page opens. + //image:apps-create-new.png[Create application] . Enter the following details: - -* *Name* (required): give a name for the application you want to create -* Optional: *Description*: give a short description for your application -* *Organization* (required): give an organization with which this application is associated -* *Authorization grant type* (required): select one of the grant types to use for the user to get tokens for this application. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#ref-controller-password-grant-type[Application using password grant type] section of the _{ControllerAG}_. -* *Client Type* (required): select the level of security of the client device. -* *Redirect URIS*: give a list of allowed URIs, separated by spaces. ++ +Name:: (required) Enter a name for the application you want to create. +Description:: (optional) Include a short description for your application. +Organization:: (required) Select an organization with which this application is associated. +Authorization grant type:: (required) Select one of the grant types to use for the user to get tokens for this application. +For more information, see xref:ref-gw-application-functions[Application functions] for more information about grant types. +Client Type:: (required) Select the level of security of the client device. +Redirect URIS:: Provide a list of allowed URIs, separated by spaces. You need this if you specified the grant type to be *Authorization code*. - -. Click btn:[Create application], or click btn:[Cancel] to abandon your changes. + -The client ID displays in a window. +. Click btn:[Create OAuth application], or click btn:[Cancel] to abandon your changes. ++ +The *Client ID* and *Client Secret* display in a window. This will be the only time the client secret will be shown. ++ +[NOTE] +==== +The *Client Secret* is only created when the *Client type* is set to *Confidential*. +==== ++ +. Click the copy icon and save the client ID and client secret to integrate an external application with {PlatformNameShort}. //image:apps-client-id-popup.png[Client ID] diff --git a/downstream/modules/platform/proc-controller-create-container-group.adoc b/downstream/modules/platform/proc-controller-create-container-group.adoc index da02064c9e..03cd1c8f0b 100644 --- a/downstream/modules/platform/proc-controller-create-container-group.adoc +++ b/downstream/modules/platform/proc-controller-create-container-group.adoc @@ -21,7 +21,48 @@ After the service account is created, its credentials are provided to {Controlle .Procedure -. To create a service account, download and use the sample service account, `containergroup sa` and modify it as needed to obtain the credentials. +. To create a service account, download and use the following sample service account example, `containergroup sa` and change it as required to obtain the credentials: ++ +[literal, options="nowrap" subs="+attributes"] +---- +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: containergroup-service-account + namespace: containergroup-namespace +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: role-containergroup-service-account + namespace: containergroup-namespace +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get"] +- apiGroups: [""] + resources: ["pods/attach"] + verbs: ["get", "list", "watch", "create"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: role-containergroup-service-account-binding + namespace: containergroup-namespace +subjects: +- kind: ServiceAccount + name: containergroup-service-account + namespace: containergroup-namespace +roleRef: + kind: Role + name: role-containergroup-service-account + apiGroup: rbac.authorization.k8s.io +---- ++ . Apply the configuration from `containergroup-sa.yml`: + [literal, options="nowrap" subs="+attributes"] @@ -50,14 +91,14 @@ oc get secret $(echo ${SA_SECRET}) -o json | jq '.data.token' | xargs | base64 - oc get secret $SA_SECRET -o json | jq '.data["ca.crt"]' | xargs | base64 --decode > containergroup-ca.crt ---- + -. Use the contents of `containergroup-sa.token` and `containergroup-ca.crt` to provide the information for the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#ref-controller-credential-openShift[OpenShift or Kubernetes API Bearer Token] required for the container group. +. Use the contents of `containergroup-sa.token` and `containergroup-ca.crt` to provide the information for the xref:ref-controller-credential-openShift[OpenShift or Kubernetes API Bearer Token] required for the container group. -To create a container group, create an link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#ref-controller-credential-openShift[OpenShift or Kubernetes API Bearer Token] credential to use with your container group. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#controller-getting-started-create-credential[Creating a credential] in the _{ControllerUG}_. +To create a container group, create an xref:ref-controller-credential-openShift[OpenShift or Kubernetes API Bearer Token] credential to use with your container group. +For more information, see xref:controller-create-credential[Creating new credentials]. .Procedure . From the navigation panel, select {MenuInfrastructureInstanceGroups}. . Click btn:[Create group] and select *Create container group*. . Enter a name for your new container group and select the credential previously created to associate it to the container group. -. Click btn:[Create Container Group]. +. Click btn:[Create container group]. diff --git a/downstream/modules/platform/proc-controller-create-credential-type.adoc b/downstream/modules/platform/proc-controller-create-credential-type.adoc index c7766dd7eb..09110167d3 100644 --- a/downstream/modules/platform/proc-controller-create-credential-type.adoc +++ b/downstream/modules/platform/proc-controller-create-credential-type.adoc @@ -5,9 +5,10 @@ To create a new credential type: .Procedure -. In the *Credential Types* view, click btn:[Add]. +. From the navigation panel, select {MenuAECredentials}. +. In the *Credential Types* view, click btn:[Create credential type]. + -image:credential-types-create-new.png[Create new credential type] +//image:credential-types-create-new.png[Create new credential type] . Enter the appropriate details in the *Name* and *Description* field. + @@ -16,7 +17,7 @@ image:credential-types-create-new.png[Create new credential type] When creating a new credential type, do not use reserved variable names that start with `ANSIBLE_` for the *INPUT* and *INJECTOR* names and IDs, as they are invalid for custom credential types. ==== -. In the *Input Configuration* field, specify an input schema that defines a set of ordered fields for that type. +. In the *Input configuration* field, specify an input schema that defines a set of ordered fields for that type. The format can be in YAML or JSON: + *YAML* @@ -109,7 +110,7 @@ When `type=string`, fields can optionally specify multiple choice options: }, ---- -. In the *Injector Configuration* field, enter environment variables or extra variables that specify the values a credential type can inject. +. In the *Injector configuration* field, enter environment variables or extra variables that specify the values a credential type can inject. The format can be in YAML or JSON (see examples in the previous step). + The following configuration in JSON format shows each field and how they are used: @@ -187,7 +188,7 @@ The following is an example of referencing many files in a custom credential tem } ---- -. Click btn:[Save]. +. Click btn:[Create credential type]. + Your newly created credential type is displayed on the list of credential types: + @@ -209,4 +210,4 @@ image:credential-types-new-listed-verify.png[Verify new credential type] .Additional resources -For information about how to create a new credential, see xref:controller-getting-started-create-credential[Creating a credential]. +For information about how to create a new credential, see xref:controller-create-credential[Creating a credential]. diff --git a/downstream/modules/platform/proc-controller-create-credential.adoc b/downstream/modules/platform/proc-controller-create-credential.adoc index 9e1f902dbc..7e9d7a5bb9 100644 --- a/downstream/modules/platform/proc-controller-create-credential.adoc +++ b/downstream/modules/platform/proc-controller-create-credential.adoc @@ -1,13 +1,13 @@ -[id="controller-getting-started-create-credential"] +[id="controller-create-credential"] = Creating new credentials -ifdef::controller-GS[] -As part of the initial setup, a demonstration credential and a Galaxy credential have been created for your use. Use the Galaxy credential as a template. -It can be copied, but not edited. -You can add more credentials as necessary. -endif::controller-GS[] +//ifdef::controller-GS[] +//As part of the initial setup, a demonstration credential and a Galaxy credential have been created for your use. Use the Galaxy credential as a template. +//It can be copied, but not edited. +//You can add more credentials as necessary. +//endif::controller-GS[] -ifdef::controller-UG[] +//ifdef::controller-UG[] Credentials added to a team are made available to all members of the team. You can also add credentials to individual users. @@ -15,42 +15,25 @@ As part of the initial setup, two credentials are available for your use: Demo C Use the Ansible Galaxy credential as a template. You can copy this credential, but not edit it. Add more credentials as needed. -endif::controller-UG[] +//endif::controller-UG[] .Procedure -. From the navigation panel, select {MenuAMCredentials}. -ifdef::controller-GS[] -. To add a new credential, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#controller-getting-started-create-credential[Creating a credential] in the _{ControllerUG}_. -+ -[NOTE] -==== -When you set up additional credentials, the user you assign must have root access or be able to use SSH to connect to the host machine. -==== -+ -. Click btn:[Demo Credential] to view its details. - -image::controller-credentials-demo-details.png[Demo Credential] -endif::controller-GS[] -ifdef::controller-UG[] -. Click btn:[Add]. -+ +. From the navigation panel, select {MenuAECredentials}. +. On the *Credentials* page, click btn:[Create credential]. +//+ //image:credentials-create-credential.png[Credentials-create] . Enter the following information: -* The name for your new credential. -* Optional: a description for the new credential. -* Optional: The name of the organization with which the credential is associated. -+ -[NOTE] -==== -A credential with a set of permissions associated with one organization persists if the credential is reassigned to another -organization. -==== -. In the *Credential Type* field, enter or select the credential type you want to create. -+ -//image:credential-types-drop-down-menu.png[Credential types] +* *Name*: the name for your new credential. +* (Optional) *Description*: a description for the new credential. +* Optional *Organization*: The name of the organization with which the credential is associated. The default is *Default*. +* *Credential type*: enter or select the credential type you want to create. . Enter the appropriate details depending on the type of credential selected, as described in xref:ref-controller-credential-types[Credential types]. -. Click btn:[Save]. ++ +image:credential-types-drop-down-menu.png[Credential types drop down list] + +. Click btn:[Create credential]. -endif::controller-UG[] +//You can also use this procedure from the *Credentials* tab when you select a credential type on the *Credential Types* page. Not sure how to document that, it should be a single route. +//endif::controller-UG[] diff --git a/downstream/modules/platform/proc-controller-create-custom-notifications.adoc b/downstream/modules/platform/proc-controller-create-custom-notifications.adoc index 609a1f368d..6811901ddd 100644 --- a/downstream/modules/platform/proc-controller-create-custom-notifications.adoc +++ b/downstream/modules/platform/proc-controller-create-custom-notifications.adoc @@ -7,9 +7,9 @@ You can xref:controller-attributes-custom-notifications[customize the text conte .Procedure . From the navigation panel, select {MenuAEAdminJobNotifications}. -. Click btn:[Add notifier]. +. Click btn:[Create notifier]. . Choose a notification type from the *Type* list. -. Enable *Customize messages* using the toggle. +. Enable *Customize messages* by using the toggle. + image::ug-notification-template-customize.png[Customize notification] + @@ -154,4 +154,4 @@ If you save the notifications template without editing the custom message (or ed * For more information, see link:https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#using-variables-with-jinja2[Using variables with Jinja2] in the Ansible documentation. * {ControllerNameStart} requires valid syntax to retrieve the correct data to display the messages. -For a list of supported attributes and the proper syntax construction, see the xref:controller-attributes-custom-notifications[Supported Attributes for Custom Notifications] section. +For a list of supported attributes and the proper syntax construction, see the xref:controller-attributes-custom-notifications[Supported attributes for custom notifications] section. diff --git a/downstream/modules/platform/proc-controller-create-host.adoc b/downstream/modules/platform/proc-controller-create-host.adoc new file mode 100644 index 0000000000..0a3691dae7 --- /dev/null +++ b/downstream/modules/platform/proc-controller-create-host.adoc @@ -0,0 +1,18 @@ +[id="proc-controller-create-host"] + += Creating a host + +To create a new host. + +.Procedure +. From the navigation panel, select {MenuInfrastructureHosts}. +. Click btn:[Create host]. +. On the *Create Host* page enter the following information: + +* *Name*: Enter a name for your host. +* (Optional) *Description*: Enter a description for your host. +// [emcwhinn] Inventory is not an option in 2.5 UI. +//* *Inventory*: Select the inventory from the list to contain your host. +* *Variables*: Enter the inventory file variables associated with your host. + +. Click btn:[Create host] to save your changes. \ No newline at end of file diff --git a/downstream/modules/platform/proc-controller-create-insights-credential.adoc b/downstream/modules/platform/proc-controller-create-insights-credential.adoc index 2e6a769418..18d5598007 100644 --- a/downstream/modules/platform/proc-controller-create-insights-credential.adoc +++ b/downstream/modules/platform/proc-controller-create-insights-credential.adoc @@ -6,14 +6,14 @@ Use the following procedure to create a new credential for use with Red Hat Insi .Procedure -. From the navigation panel, select {MenuAMCredentials}. +. From the navigation panel, select {MenuAECredentials}. . Click btn:[Create credential]. . Enter the appropriate details in the following fields: * *Name*: Enter the name of the credential. * Optional: *Description*: Enter a description for the credential. * Optional: *Organization*: Enter the name of the organization with which the credential is associated, or click the search image:search.png[Search,15,15] icon and select it from the *Select organization* window. -* *Credential Type*: Enter *Insights* or select it from the list. +* *Credential type*: Enter *Insights* or select it from the list. + image::ug-credential-types-popup-window-insights.png[Credentials insights pop up] + diff --git a/downstream/modules/platform/proc-controller-create-insights-project.adoc b/downstream/modules/platform/proc-controller-create-insights-project.adoc index 22d5122708..e28bd5df45 100644 --- a/downstream/modules/platform/proc-controller-create-insights-project.adoc +++ b/downstream/modules/platform/proc-controller-create-insights-project.adoc @@ -14,10 +14,10 @@ Note that the following fields require specific Red Hat Insights related entries * *Name*: Enter the name for your Red Hat Insights project. * Optional: *Description*: Enter a description for the project. * *Organization*: Enter the name of the organization with which the credential is associated, or click the search image:search.png[Search,15,15] icon and select it from the *Select organization* window. -* Optional: *Execution Environment*: The {ExecEnvShort} that is used for jobs that use this project. -* *Source Control Type*: Select *Red Hat Insights*. -* Optional: *Content Signature Validation Credential*: Enable content signing to verify that the content has remained secure when a project is synced. -* *Insights Credential*: This is pre-populated with the Red Hat Insights credential you previously created. +* Optional: *Execution environment*: The {ExecEnvShort} that is used for jobs that use this project. +* *Source control type*: Select *Red Hat Insights*. +* Optional: *Content signature validation credential*: Enable content signing to verify that the content has remained secure when a project is synced. +* *Insights credential*: This is pre-populated with the Red Hat Insights credential you created before. If not, enter the credential, or click the search image:search.png[Search,15,15] icon and select it from the *Select Insights Credential* window. . Select the update options for this project from the *Options* field and provide any additional values, if applicable. For more information about each option click the tooltip image:question_circle.png[Tooltip,15,15] icon next to each one. diff --git a/downstream/modules/platform/proc-controller-create-instance-group.adoc b/downstream/modules/platform/proc-controller-create-instance-group.adoc index 2238052418..2098f30b3a 100644 --- a/downstream/modules/platform/proc-controller-create-instance-group.adoc +++ b/downstream/modules/platform/proc-controller-create-instance-group.adoc @@ -26,10 +26,15 @@ If you do not specify values, then the *Policy instance minimum* and *Policy ins [NOTE] ==== The default value of 0 for *Max concurrent jobs* and *Max forks* denotes no limit. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/controller-instance-and-container-groups#controller-instance-group-capacity[Instance group capacity limits] in the _{ControllerAG}_. +ifdef::controller-UG[] +For more information, see xref:controller-instance-group-capacity[Instance group capacity limits]. +endif::controller-UG[] +ifdef::operator-mesh[] +For more information, see link:{URLControllerUserGuide}/index#controller-instance-group-capacity[Instance group capacity limits]. +endif::operator-mesh[] ==== -. Click btn:[Create Instance Group], or, if you have edited an existing Instance Group click btn:[Save Instance Group] +. Click btn:[Create instance group], or, if you have edited an existing Instance Group click btn:[Save instance group] When you have successfully created the instance group the *Details* tab of the newly created instance group enables you to review and edit your instance group information. diff --git a/downstream/modules/platform/proc-controller-create-inventory.adoc b/downstream/modules/platform/proc-controller-create-inventory.adoc index 2db8b447ce..98eb4ce497 100644 --- a/downstream/modules/platform/proc-controller-create-inventory.adoc +++ b/downstream/modules/platform/proc-controller-create-inventory.adoc @@ -1,21 +1,76 @@ [id="controller-creating-inventory"] -= Creating a new Inventory += Browsing and creating inventories The Inventories window displays a list of the inventories that are currently available. You can sort the inventory list by name and searched type, organization, description, owners and modifiers of the inventory, or additional criteria. .Procedure -. To view existing inventories, select {MenuInfrastructureInventories} from the navigation panel. -** {ControllerNameStart} provides a demonstration inventory for you to use as you learn how the controller works. -You can use it as it is or edit it later. -You can create another inventory, if necessary. -. To add another inventory, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#proc-controller-adding-new-inventory[Add a new inventory] in the _{ControllerUG}_ for more information. -. Click btn:[Demo Inventory] to view its details. +. From the navigational panel, select {MenuInfrastructureInventories}. +. Click btn:[Create inventory], and select the type of inventory to create. +. Enter the appropriate details into the following fields: -image::controller-inventories-demo-details.png[Demo inventory] +* *Name*: Enter a name appropriate for this inventory. +* Optional: *Description*: Enter an arbitrary description as appropriate. +* *Organization*: Required. Choose among the available organizations. +* Only applicable to Smart Inventories: *Smart Host Filter*: Populate the hosts for this inventory by using a search filter. ++ +_Example_ ++ +name__icontains=RedHat. ++ +These options are based on the organization you chose. ++ +Filters are similar to tags in that tags are used to filter certain hosts that contain those names. +Therefore, to populate the *Smart Host Filter* field, specify a tag that contains the hosts you want, not the hosts themselves. ++ +Filters are case-sensitive. +* *Instance Groups*: Select the instance group or groups for this inventory to run on. ++ +You can select many instance groups and sort them in the order that you want them run. ++ +//image:select-instance-groups-modal.png[image] -As with organizations, inventories also have associated users and teams that you can view through the *Access* tab. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#controller-inventories[Inventories] in the _{ControllerUG}_. +* Optional: *Labels*: Supply labels that describe this inventory, so they can be used to group and filter inventories and jobs. +* Only applicable to constructed inventories: *Input inventories*: Specify the source inventories to include in this constructed inventory. +//Click the image:search.png[Search,15,15] icon to select from available inventories. +Empty groups from input inventories are copied into the constructed inventory. +* Optional:(Only applicable to constructed inventories): *Cached timeout (seconds)*: Set the length of time you want the cache plugin data to timeout. +* Only applicable to constructed inventories: *Verbosity*: Control the level of output that Ansible produces as the playbook executes related to inventory sources associated with constructed inventories. ++ +Select the verbosity from: -A user with the role of *System Administrator* has been automatically populated for this. +* *Normal* +* *Verbose* +* *More verbose* +* *Debug* +* *Connection Debug* +* *WinRM Debug* + +** *Verbose* logging includes the output of all commands. +** *More verbose* provides more detail than *Verbose*. +** *Debug* logging is exceedingly verbose and includes information about SSH operations that can be useful in certain support instances. Most users do not need to see debug mode output. +//Not sure of this +** *Connection Debug* enables you to run ssh in verbose mode, providing debugging information about the SSH connection progress. +//Not sure of this. +** *WinRM Debug* used for verbosity specific to windows remote management ++ +Click the image:arrow.png[Expand,15,15] icon for information on *How to use the constructed inventory plugin*. +* Only applicable to constructed inventories: *Limit*: Restricts the number of returned hosts for the inventory source associated with the constructed inventory. +You can paste a group name into the limit field to only include hosts in that group. +For more information, see the *Source vars* setting. +* Only applicable to standard inventories: *Options*: Check the *Prevent Instance Group Fallback* option to enable only the instance groups listed in the *Instance Groups* field to execute the job. +If unchecked, all available instances in the execution pool are used based on the hierarchy. + +* *Variables* (*Source vars* for constructed inventories): + +** *Variables* Variable definitions and values to apply to all hosts in this inventory. +Enter variables by using either JSON or YAML syntax. +Use the radio button to toggle between the two. +** *Source vars* for constructed inventories creates groups, specifically under the `groups` key of the data. +It accepts Jinja2 template syntax, renders it for every host, makes a `true` or `false` evaluation, and includes the host in the group (from the key of the entry) if the result is `true`. +This is particularly useful because you can paste that group name into the limit field to only include hosts in that group. +//See Example 1 in xref:ref-controller-smart-host-filter[Smart host filters]. +. Click btn:[Create inventory]. + +After saving the new inventory, you can proceed with configuring permissions, groups, hosts, sources, and view completed jobs, if applicable to the type of inventory. diff --git a/downstream/modules/platform/proc-controller-create-job-template.adoc b/downstream/modules/platform/proc-controller-create-job-template.adoc index c960e56225..0e97c3b717 100644 --- a/downstream/modules/platform/proc-controller-create-job-template.adoc +++ b/downstream/modules/platform/proc-controller-create-job-template.adoc @@ -5,13 +5,15 @@ .Procedure . From the navigation panel, select {MenuAETemplates}. -. On the *Templates* list view, select *Create job template* from the *Create template* list. +. On the *Templates* page, select *Create job template* from the *Create template* list. . Enter the appropriate details in the following fields: + [NOTE] ==== If a field has the *Prompt on launch* checkbox selected, launching the job prompts you for the value for that field when launching. + Most prompted values override any values set in the job template. + Exceptions are noted in the following table. ==== + @@ -20,7 +22,7 @@ Exceptions are noted in the following table. | *Field* | *Options* | *Prompt on Launch* | Name | Enter a name for the job.| N/A | Description| Enter an arbitrary description as appropriate (optional). | N/A -| Job Type a| Choose a job type: +| Job type a| Choose a job type: - Run: Start the playbook when launched, running Ansible tasks on the selected hosts. @@ -34,7 +36,7 @@ A System Administrator must grant you or your team permissions to be able to use Inventory prompts show up as its own step in a later prompt window. | Project | Select the project to use with this job template from the projects available to the user that is logged in. | N/A -| SCM branch | This field is only present if you chose a project that allows branch override. +| Source control branch | This field is only present if you chose a project that allows branch override. Specify the overriding branch to use in your job run. If left blank, the specified SCM branch (or commit hash or tag) from the project is used. @@ -79,15 +81,6 @@ When a label is removed, it is no longer associated with that particular Job or - Jobs inherit labels from the Job Template at the time of launch. If you delete a label from a Job Template, it is also deleted from the Job. a| - If selected, even if a default value is supplied, you are prompted when launching to supply additional labels, if needed. - You cannot delete existing labels, selecting image:disassociate.png[Disassociate,10,10] only removes the newly added labels, not existing default labels. -| Variables a| - Pass extra command line variables to the playbook. -This is the "-e" or "-extra-vars" command line parameter for ansible-playbook that is documented in the Ansible documentation at link:https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#defining-variables-at-runtime[Defining variables at runtime]. -- Provide key or value pairs by using either YAML or JSON. -These variables have a maximum value of precedence and overrides other variables specified elsewhere. -The following is an example value: -`git_branch: production -release_version: 1.5` | Yes. - -If you want to be able to specify `extra_vars` on a schedule, you must select *Prompt on launch* for Variables on the job template, or enable a survey on the job template. Those answered survey questions become `extra_vars`. | Forks | The number of parallel or simultaneous processes to use while executing the playbook. A value of zero uses the Ansible default setting, which is five parallel processes unless overridden in `/etc/ansible/ansible.cfg`. | Yes | Limit a| A host pattern to further constrain the list of hosts managed or affected by the playbook. You can separate many patterns by colons (:). @@ -108,7 +101,7 @@ Verbose logging includes the output of all commands. Debug logging is exceedingly verbose and includes information about SSH operations that can be useful in certain support instances. Verbosity `5` causes {ControllerName} to block heavily when jobs are running, which could delay reporting that the job has finished (even though it has) and can cause the browser tab to lock up.| Yes -| Job Slicing | Specify the number of slices you want this job template to run. +| Job slicing | Specify the number of slices you want this job template to run. Each slice runs the same tasks against a part of the inventory. For more information about job slices, see xref:controller-job-slicing[Job Slicing]. | Yes | Timeout a| This enables you to specify the length of time (in seconds) that the job can run before it is canceled. Consider the following for setting the timeout value: @@ -117,10 +110,10 @@ For more information about job slices, see xref:controller-job-slicing[Job Slici - A negative timeout (<0) on a job template is a true "no timeout" on the job. - A timeout of 0 on a job template defaults the job to the global timeout (which is no timeout by default). - A positive timeout sets the timeout for that job template. | Yes -| Show Changes | Enables you to see the changes made by Ansible tasks. | Yes -| Instance Groups | Choose link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/controller-instance-and-container-groups[Instance and Container Groups] to associate with this job template. +| Show changes | Enables you to see the changes made by Ansible tasks. | Yes +| Instance groups | Choose xref:controller-instance-and-container-groups[Instance and Container Groups] to associate with this job template. If the list is extensive, use the image:examine.png[examine,15,15] icon to narrow the options. -Job template instance groups contribute to the job scheduling criteria, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/controller-instance-and-container-groups#controller-job-runtime-behavior[Job Runtime Behavior] and link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/controller-instance-and-container-groups#controller-control-job-run[Control where a job runs] for rules. +Job template instance groups contribute to the job scheduling criteria, see link:{URLControllerAdminGuide}/controller-clustering#controller-cluster-job-runtime[Job Runtime Behavior] and xref:controller-control-job-run[Control where a job runs] for rules. A System Administrator must grant you or your team permissions to be able to use an instance group in a job template. Use of a container group requires admin rights. a| - Yes. @@ -129,13 +122,22 @@ If selected, you are providing the jobs preferred instance groups in order of pr - If you prompt for an instance group, what you enter replaces the normal instance group hierarchy and overrides all of the organizations' and inventories' instance groups. - The Instance Groups prompt shows up as its own step in a later prompt window. -| Job Tags | Type and select the *Create* menu to specify which parts of the playbook should be executed. +| Job tags | Type and select the *Create* menu to specify which parts of the playbook should be executed. For more information and examples see link:https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_tags.html[Tags] in the Ansible documentation. | Yes -| Skip Tags | Type and select the *Create* menu to specify certain tasks or parts of the playbook to skip. +| Skip tags | Type and select the *Create* menu to specify certain tasks or parts of the playbook to skip. For more information and examples see link:https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_tags.html[Tags] in the Ansible documentation. | Yes +| Extra variables a| - Pass extra command line variables to the playbook. +This is the "-e" or "-extra-vars" command line parameter for ansible-playbook that is documented in the Ansible documentation at link:https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#defining-variables-at-runtime[Defining variables at runtime]. +- Give key or value pairs by using either YAML or JSON. +These variables have a maximum value of precedence and overrides other variables specified elsewhere. +The following is an example value: +`git_branch: production +release_version: 1.5` | Yes. + +If you want to be able to specify `extra_vars` on a schedule, you must select *Prompt on launch* for Variables on the job template, or enable a survey on the job template. Those answered survey questions become `extra_vars`. |=== + -. Specify the following options for launching this template, if necessary: +. You can set the following options for launching this template, if necessary: * *Privilege escalation*: If checked, you enable this playbook to run as an administrator. This is the equal of passing the `--become` option to the `ansible-playbook` command. * *Provisioning callback*: If checked, you enable a host to call back to {ControllerName} through the REST API and start a job from this job template. @@ -151,13 +153,15 @@ GitHub and GitLab are the supported SCM systems. ** *Webhook key*: Generated shared secret to be used by the webhook service to sign payloads sent to {ControllerName}. You must configure this in the settings on the webhook service in order for {ControllerName} to accept webhooks from this service. ** *Webhook credential*: Optionally, give a GitHub or GitLab personal access token (PAT) as a credential to use to send status updates back to the webhook service. ++ Before you can select it, the credential must exist. -See xref:ref-controller-credential-types[Credential Types] to create one. ++ +See xref:ref-controller-credential-types[Credential types] to create one. ** For additional information about setting up webhooks, see xref:controller-work-with-webhooks[Working with Webhooks]. * *Concurrent jobs*: If checked, you are allowing jobs in the queue to run simultaneously if not dependent on one another. Check this box if you want to run job slices simultaneously. For more information, see xref:controller-capacity-determination[{ControllerNameStart} capacity determination and job impact]. * *Enable fact storage*: If checked, {ControllerName} stores gathered facts for all hosts in an inventory related to the job running. * *Prevent instance group fallback*: Check this option to allow only the instance groups listed in the *Instance Groups* field to run the job. -If clear, all available instances in the execution pool are used based on the hierarchy described in link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index[Control where a job runs]. +If clear, all available instances in the execution pool are used based on the hierarchy described in xref:controller-control-job-run[Control where a job runs]. . Click btn:[Create job template], when you have completed configuring the details of the job template. Creating the template does not exit the job template page but advances to the Job Template *Details* tab. @@ -170,4 +174,4 @@ You must first save the template before launching, otherwise, btn:[Launch templa .Verification . From the navigation panel, select {MenuAETemplates}. -. Verify that the newly created template appears on the *Templates* list view. +. Verify that the newly created template appears on the *Templates* page. diff --git a/downstream/modules/platform/proc-controller-create-notification-template.adoc b/downstream/modules/platform/proc-controller-create-notification-template.adoc index 13a84b60a0..d72a9879b8 100644 --- a/downstream/modules/platform/proc-controller-create-notification-template.adoc +++ b/downstream/modules/platform/proc-controller-create-notification-template.adoc @@ -15,4 +15,4 @@ Use the following procedure to create a notification template. * *Organization*: Specify the organization that the notification belongs to. * *Type*: Choose a type of notification from the drop-down menu. For more information, see the xref:controller-notification-types[Notification types] section. -. Click btn:[Save]. +. Click btn:[Save notifier]. diff --git a/downstream/modules/platform/proc-controller-create-organization.adoc b/downstream/modules/platform/proc-controller-create-organization.adoc index 10a2575f19..21eec87066 100644 --- a/downstream/modules/platform/proc-controller-create-organization.adoc +++ b/downstream/modules/platform/proc-controller-create-organization.adoc @@ -1,51 +1,40 @@ +:_mod-docs-content-type: PROCEDURE + [id="proc-controller-create-organization"] = Creating an organization -[NOTE] -==== -{ControllerNameStart} automatically creates a default organization. -If you have a Self-support level license, you have only the default organization available and must not delete it. +{PlatformNameShort} automatically creates a default organization. If you have a self-support level license, you have only the default organization available and cannot delete it. -You can use the default organization as it is initially set up and edit it later. -==== +//[ddacosta] Editing has been disabled but there are ongoing conversations about adding it back later: +// You can use the default organization as it is initially set up and edit it later. -. Click btn:[Add] to create a new organization. +.Procedure +. From the navigation panel, select {MenuAMOrganizations}. +. Click btn:[Create organization]. +. Enter the *Name* and optionally provide a *Description* for your organization. + -image:organizations-new-organization-form.png[Organizations- new organization form] - -. You can configure several attributes of an organization: - -* Enter the *Name* for your organization (required). -* Enter a *Description* for the organization. -* *Max Hosts* is only editable by a superuser to set an upper limit on the number of license hosts that an organization can have. -Setting this value to *0* signifies no limit. -If you try to add a host to an organization that has reached or exceeded its cap on hosts, an error message displays: +[NOTE] +==== +If {ControllerName} is enabled on the platform, continue with Step 4. Otherwise, proceed to Step 6. +==== + -The inventory sync output view also shows the host limit error. +. Select the name of the *Execution environment* or search for one that exists that members of this team can run automation. +. Enter the name of the *Instance Groups* on which to run this organization. +. Optional: Enter the *Galaxy credentials* or search from a list of existing ones. +. Select the *Max hosts* for this organization. The default is 0. When this value is 0, it signifies no limit. If you try to add a host to an organization that has reached or exceeded its cap on hosts, an error message displays: + -image:organizations-max-hosts-error-output-view.png[Error] +---- +You have already reached the maximum number of 1 hosts allowed for your organization. Contact your System Administrator for assistance. +---- + -Click btn:[Details] for additional information about the error. +. Click btn:[Next]. +. If you selected more than 1 instance group, you can manage the order by dragging and dropping the instance group up or down in the list and clicking btn:[Confirm]. + -* Enter the name of the *Instance Groups* on which to run this organization. -* Enter the name of the {ExecEnvShort} or search for one that exists on which to run this organization. -For more information, see link:https://docs.ansible.com/automation-controller/4.4/html/upgrade-migration-guide/upgrade_to_ees.html#upgrade-venv[Upgrading to Execution Environments]. -* Optional: Enter the *Galaxy Credentials* or search from a list of existing ones. -. Click btn:[Save] to finish creating the organization. - -When the organization is created, {ControllerName} displays the Organization details, and enables you to manage access and {ExecEnvShort}s for the organization. - -image:organizations-show-record-for-example-organization.png[Organization details] - -From the *Details* tab, you can edit or delete the organization. - [NOTE] ==== -If you attempt to delete items that are used by other work items, a message lists the items that are affected by the deletion and prompts you to confirm the deletion. -Some screens contain items that are invalid or have been deleted previously, and will fail to run. +The execution precedence is determined by the order in which the instance groups are listed. ==== - -The following is an example of such a message: - -image:warning-deletion-dependencies.png[Warning] \ No newline at end of file ++ +. Click btn:[Next] and verify the organization settings. +. Click btn:[Finish]. diff --git a/downstream/modules/platform/proc-controller-create-workflow-template.adoc b/downstream/modules/platform/proc-controller-create-workflow-template.adoc index e950e236e8..fbd261012a 100644 --- a/downstream/modules/platform/proc-controller-create-workflow-template.adoc +++ b/downstream/modules/platform/proc-controller-create-workflow-template.adoc @@ -1,6 +1,6 @@ [id="controller-create-workflow-template"] -= Creating a workflow template += Creating a workflow job template To create a new workflow job template, complete the following steps: @@ -61,6 +61,10 @@ a| Yes If selected, even if a default value is supplied, you are prompted when launching to supply additional labels, if needed. - You cannot delete existing labels, selecting image:disassociate.png[Disassociate,10,10] only removes the newly added labels, not existing default labels. +| Job tags | Type and select the *Create* drop-down to specify which parts of the playbook should run. +For more information and examples see link:https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_tags.html[Tags] in the Ansible documentation. | Yes +| Skip tags | Type and select the *Create* drop-down to specify certain tasks or parts of the playbook to skip. +For more information and examples see link:https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_tags.html[Tags] in the Ansible documentation. | Yes | Extra variables a| - Pass extra command line variables to the playbook. This is the "-e" or "-extra-vars" command line parameter for ansible-playbook that is documented in the Ansible documentation at link:https://docs.ansible.com/ansible/latest/reference_appendices/general_precedence.html[Controlling how Ansible behaves: precedence rules]. @@ -70,27 +74,24 @@ release_version: 1.5` | Yes If you want to be able to specify `extra_vars` on a schedule, you must select *Prompt on launch* for *Extra variables* on the workflow job template, or enable a survey on the job template. Those answered survey questions become `extra_vars`. For more information about extra variables, see xref:controller-extra-variables[Extra Variables]. -| Job tags | Type and select the *Create* drop-down to specify which parts of the playbook should run. -For more information and examples see link:https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_tags.html[Tags] in the Ansible documentation. | Yes -| Skip Tags | Type and select the *Create* drop-down to specify certain tasks or parts of the playbook to skip. -For more information and examples see link:https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_tags.html[Tags] in the Ansible documentation. | Yes |=== + . Specify the following *Options* for launching this template, if necessary: + * Check *Enable webhook* to turn on the ability to interface with a predefined SCM system web service that is used to launch a workflow job template. GitHub and GitLab are the supported SCM systems. ** If you enable webhooks, other fields display, prompting for additional information: *** *Webhook service*: Select which service to listen for webhooks from. -*** *Webhook credential*: Optionally, provide a GitHub or GitLab personal access token (PAT) as a credential to use to send status updates back to the webhook service. -For more information, see xref:ref-controller-credential-types[Credential Types] to create one. -+ -** When you click btn:[Create workflow job template], additional fields populate and the workflow visualizer automatically opens. *** *Webhook URL*: Automatically populated with the URL for the webhook service to POST requests to. +//*** *Webhook credential*: Optionally, provide a GitHub or GitLab personal access token (PAT) as a credential to use to send status updates back to the webhook service. +//For more information, see TBD[Credential Types] to create one. *** *Webhook key*: Generated shared secret to be used by the webhook service to sign payloads sent to {ControllerName}. You must configure this in the settings on the webhook service so that webhooks from this service are accepted in {ControllerName}. For additional information about setting up webhooks, see xref:controller-work-with-webhooks[Working with Webhooks]. + -Check *Enable concurrent jobs* to allow simultaneous runs of this workflow. +//** When you click btn:[Create workflow job template], the workflow visualizer automatically opens. +//*** *Webhook URL*: Automatically populated with the URL for the webhook service to POST requests to. +* Check *Enable concurrent jobs* to allow simultaneous runs of this workflow. For more information, see xref:controller-capacity-determination[{ControllerNameStart} capacity determination and job impact]. + . When you have completed configuring the workflow template, click btn:[Create workflow job template]. @@ -110,7 +111,7 @@ There you can complete the following tasks: + [NOTE] ==== -Save the template before launching, or btn:[Launch template] remains disabled. +Save the template before launching, or btn:[Launch template] remains disabled. The *Notifications* tab is only present after you save the template. ==== diff --git a/downstream/modules/platform/proc-controller-creating-a-team.adoc b/downstream/modules/platform/proc-controller-creating-a-team.adoc index 5cabe5ad96..a94cd9e4ef 100644 --- a/downstream/modules/platform/proc-controller-creating-a-team.adoc +++ b/downstream/modules/platform/proc-controller-creating-a-team.adoc @@ -1,50 +1,27 @@ +:_mod-docs-content-type: PROCEDURE + [id="proc-controller-creating-a-team"] = Creating a team -You can create as many teams of users as you need for your organization. -You can assign permissions to each team, just as with users. -Teams can also assign ownership for credentials, minimizing the steps to assign the same credentials to the same user. - -.Procedure -. On the *Teams* page, click btn:[Add]. -+ -//image:teams-create-new-team.png[Teams -create new team] -. Enter the appropriate details into the following fields: - -* *Name* -* Optional: *Description* -* *Organization*: You must select an existing organization -. Click *Save*. -The *Details* dialog opens. -. Review and edit your team information. -+ -image:teams-example-team-successfully-created.png[Teams- Details dialog] - -== Adding or removing a user to a team - -To add a user to a team, the user must already have been created. -For more information, see xref:proc-controller-creating-a-user[Creating a user]. -Adding a user to a team adds them as a member only. -Use the *Access* tab to specify a role for the user on different resources. +You can create new teams, assign an organization to the team, and manage the users and administrators associated with each team. +Users associated with a team inherit the permissions associated with the team and any organization permissions to which the team has membership. -.Procedure -. In the *Access* tab of the *Details* page click btn:[Add]. -. Follow the prompts to add a user and assign them to roles. -. Click btn:[Save]. - -== Removing roles for a user +To add a user or administrator to a team, the user must have already been created. .Procedure -* To remove roles for a particular user, click the image:disassociate.png[Disassociate,10,10] icon next to its resource. - -//image:permissions-disassociate.png[image] - -This launches a confirmation dialog, asking you to confirm the disassociation. - -//image:permissions-disassociate-confirm.png[image] +. From the navigation panel, select {MenuAMTeams}. +. Click btn:[Create team]. +. Enter a *Name* and optionally give a *Description* for the team. +. Select an *Organization* to be associated with this team. ++ +[NOTE] +==== +Each team can only be assigned to one organization. +==== ++ +. Click btn:[Create team]. ++ +The *Details* page opens, where you can review and edit your team information. -include::ref-controller-team-access.adoc[leveloffset=+1] -include::ref-controller-team-roles.adoc[leveloffset=+1] -include::proc-controller-team-add-permissions.adoc[leveloffset=+1] diff --git a/downstream/modules/platform/proc-controller-creating-a-user.adoc b/downstream/modules/platform/proc-controller-creating-a-user.adoc index 4ab27f7a18..3871b2ccd4 100644 --- a/downstream/modules/platform/proc-controller-creating-a-user.adoc +++ b/downstream/modules/platform/proc-controller-creating-a-user.adoc @@ -1,57 +1,34 @@ +:_mod-docs-content-type: PROCEDURE + [id="proc-controller-creating-a-user"] = Creating a user -To create new users in {ControllerName} and assign them a role. +There are three types of users in {PlatformNameShort}: + +Normal user:: Normal users have read and write access limited to the resources (such as inventory, projects, and job templates) for which that user has been granted the appropriate roles and privileges. Normal users are the default type of user when no other *User type* is specified. +{PlatformNameShort} Administrator:: An administrator (also known as a Superuser) has full system administration privileges — with full read and write privileges over the entire installation. An administrator is typically responsible for managing all aspects of and delegating responsibilities for day-to-day work to various users. +{PlatformNameShort} Auditor:: Auditors have read-only capability for all objects within the environment. .Procedure -. On the *Users* page, click btn:[Add]. -+ -The *Create User* dialog opens. -. Enter the appropriate details about your new user. -Fields marked with an asterisk (*) are required. +. From the navigation panel, select {MenuAMUsers}. +. Click btn:[Create user]. +. Enter the details about your new user in the fields on the *Create user* page. Fields marked with an asterisk (*) are required. +. Normal users are the default when no *User type* is specified. To define a user as an administrator or auditor, select a *User type* checkbox. + [NOTE] ==== If you are modifying your own password, log out and log back in again for it to take effect. ==== + -You can assign three types of users: - -* *Normal User*: Normal Users have read and write access limited to the resources (such as inventory, projects, and job templates) for which that user has been granted the appropriate roles and privileges. -* *System Auditor*: Auditors inherit the read-only capability for all objects within the environment. -* *System Administrator*: A System Administrator (also known as a Superuser) has full system administration privileges -- with full read and write privileges over the entire installation. -A System Administrator is typically responsible for managing all aspects of and delegating responsibilities for day-to-day work to various users. -+ -image:users-create-user-form-types.png[User Types] -+ -[NOTE] -==== -A default administrator with the role of *System Administrator* is automatically created during the installation process and is available to all users of {ControllerName}. -One *System Administrator* must always exist. -To delete the *System Administrator* account, you must first create another *System Administrator* account. -==== - -. Click btn:[Save]. -+ -When the user is successfully created, the *User* dialog opens. -+ -image:users-edit-user-form.png[Edit User Form] +. Select the *Organization* to be assigned for this user. For information about creating a new organization, refer to xref:proc-controller-create-organization[Creating an organization]. +. Click btn:[Create user]. -. Click btn:[Delete] to delete the user, or you can delete users from a list of current users. -For more information, see xref:proc-controller-deleting-a-user[Deleting a user]. -+ -The same window opens whether you click the user's name, or the Edit image:leftpencil.png[Edit, 15,15] icon beside the user. You can use this window to review and modify the User's *Organizations*, *Teams*, *Roles*, and other user membership details. +When the user is successfully created, the *User* dialog opens. From here, you can review and modify the user’s Teams, Roles, Tokens and other membership details. [NOTE] ==== If the user is not newly-created, the details screen displays the last login activity of that user. - -//image:users-last-login-info.png[image] ==== -If you log in as yourself, and view the details of your user profile, you can manage tokens from your user profile. - -For more information, see xref:proc-controller-user-tokens[Adding a user token]. - -//image:user-with-token-button.png[image] +If you log in as yourself, and view the details of your user profile, you can manage tokens from your user profile by selecting the *Tokens* tab For more information, see xref:proc-controller-apps-create-tokens[Adding a token]. diff --git a/downstream/modules/platform/proc-controller-credential-create-openshift-account.adoc b/downstream/modules/platform/proc-controller-credential-create-openshift-account.adoc index 52a6d006dc..3318d83713 100644 --- a/downstream/modules/platform/proc-controller-credential-create-openshift-account.adoc +++ b/downstream/modules/platform/proc-controller-credential-create-openshift-account.adoc @@ -8,8 +8,49 @@ After you create the service account, its credentials are provided to {Controlle After you create a service account, use the information in the new service account to configure {ControllerName}. .Procedure -. To create a service account, download and use the link:https://docs.ansible.com/automation-controller/latest/html/userguide/_downloads/7a0708e6c2113e9601bf252270fa6c50/containergroup-sa.yml[sample service account] and change it as required to obtain the previous credentials. -. Apply the configuration from the link:https://docs.ansible.com/automation-controller/latest/html/userguide/_downloads/7a0708e6c2113e9601bf252270fa6c50/containergroup-sa.yml[sample service account]: +. To create a service account, download and use the sample service account, `containergroup sa`, and change it as needed to obtain the credentials: ++ +[literal, options="nowrap" subs="+attributes"] +---- +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: containergroup-service-account + namespace: containergroup-namespace +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: role-containergroup-service-account + namespace: containergroup-namespace +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get"] +- apiGroups: [""] + resources: ["pods/attach"] + verbs: ["get", "list", "watch", "create"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: role-containergroup-service-account-binding + namespace: containergroup-namespace +subjects: +- kind: ServiceAccount + name: containergroup-service-account + namespace: containergroup-namespace +roleRef: + kind: Role + name: role-containergroup-service-account + apiGroup: rbac.authorization.k8s.io +---- ++ +. Apply the configuration from `containergroup-sa.yml`: + [literal, options="nowrap" subs="+attributes"] ---- diff --git a/downstream/modules/platform/proc-controller-customize-pod-spec.adoc b/downstream/modules/platform/proc-controller-customize-pod-spec.adoc index be2c7ac232..7076b7ab4e 100644 --- a/downstream/modules/platform/proc-controller-customize-pod-spec.adoc +++ b/downstream/modules/platform/proc-controller-customize-pod-spec.adoc @@ -10,12 +10,12 @@ A full list of options can be found in the link:https://docs.openshift.com/onlin . From the navigation panel, select {MenuInfrastructureInstanceGroups}. . Click btn:[Create group] and select *Create container group*. -. Check the option for *Customize pod spec* -. Enter a custom Kubernetes or OpenShift Pod specification in the *Pod Spec Override* field. +. Check the option for *Customize pod spec*. +. Enter a custom Kubernetes or OpenShift Pod specification in the *Pod spec override* field. + image::ag-instance-group-customize-cg-pod.png[Customize pod specification] + -. Click btn:[Create Container Group]. +. Click btn:[Create container group]. //You can give additional customizations, if needed. Click btn:[Expand] to view the entire customization window: diff --git a/downstream/modules/platform/proc-controller-define-filter-with-facts.adoc b/downstream/modules/platform/proc-controller-define-filter-with-facts.adoc index bfe7fb9671..5ee2c74ca4 100644 --- a/downstream/modules/platform/proc-controller-define-filter-with-facts.adoc +++ b/downstream/modules/platform/proc-controller-define-filter-with-facts.adoc @@ -6,8 +6,8 @@ Use the following procedure to use `ansible_facts` to define the host filter whe .Procedure . From the navigation panel, select {MenuInfrastructureInventories}. -. Select *Add Smart Inventory* from *Add* list. -. In the *Create new smart inventory* page, click the image:search.png[Search,15,15] icon in the *Smart host filter* field. +. Select *Create Smart Inventory* from *Create inventory* list. +//. In the *Create Smart Inventory* page, click the image:search.png[Search,15,15] icon in the *Smart host filter* field. This opens a window to filter hosts for this inventory. + image:define_host_filter.png[Dfine host filter] diff --git a/downstream/modules/platform/proc-controller-define-schedule-rules.adoc b/downstream/modules/platform/proc-controller-define-schedule-rules.adoc index 8e47292635..61d239e8b0 100644 --- a/downstream/modules/platform/proc-controller-define-schedule-rules.adoc +++ b/downstream/modules/platform/proc-controller-define-schedule-rules.adoc @@ -5,7 +5,7 @@ Enter the following information: * *Frequency*: Enter how frequently the schedule runs. -//* *Interval*: I don't know what this indicates. +* *Interval*: * *Week Start*: Select the day of the week that you want the week to begin. * *Weekdays*: Select the days of the week on which to run the schedule. * *Months*: Select the months of the year on which to run the schedule @@ -20,7 +20,7 @@ For more information, see the link:https://datatracker.ietf.org/doc/html/rfc5545 * *Count*: The number of times this rule should be used. * *Until*: Use this rule until the specified date and time -Click btn:[Save rule] +Click btn:[Save rule]. The *Schedule Rules* summary page is displayed. Click btn:[Add rule] to add additional rules. diff --git a/downstream/modules/platform/proc-controller-deleting-a-user.adoc b/downstream/modules/platform/proc-controller-deleting-a-user.adoc index 8dc4831d99..2dd255887e 100644 --- a/downstream/modules/platform/proc-controller-deleting-a-user.adoc +++ b/downstream/modules/platform/proc-controller-deleting-a-user.adoc @@ -1,16 +1,18 @@ +:_mod-docs-content-type: PROCEDURE + [id="proc-controller-deleting-a-user"] = Deleting a user -Before you can delete a user, you must have user permissions. -When you delete a user account, the name and email of the user are permanently removed from {ControllerName}. +Before you can delete a user, you must have normal user or system administrator permissions. When you delete a user account, the name and email of the user are permanently removed from {PlatformNameShort}. .Procedure -. From the navigation panel, select {MenuControllerUsers}. -. Click btn:[Users] to display a list of the current users. -. Select the check box for the user that you want to remove. -. Click btn:[Delete]. -//+ -//image:users-home-users-checked-delete.png[image] +. From the navigation panel, select {MenuAMUsers}. +. Select the checkbox for the user that you want to remove. +. Click the {MoreActionsIcon} icon next to the user you want removed and select *Delete user*. ++ +[NOTE] +==== +You can delete multiple users by selecting the checkbox next to each user you want to remove, and clicking *Delete users* from the *More actions {MoreActionsIcon}* list. +==== -. Click btn:[Delete] in the confirmation warning message to permanently delete the user. diff --git a/downstream/modules/platform/proc-controller-edit-credential.adoc b/downstream/modules/platform/proc-controller-edit-credential.adoc index 78e6695908..9309dd2d4d 100644 --- a/downstream/modules/platform/proc-controller-edit-credential.adoc +++ b/downstream/modules/platform/proc-controller-edit-credential.adoc @@ -8,5 +8,5 @@ As part of the initial setup, you can leave the default *Demo Credential* as it . Edit the credential by using one of these methods: ** Go to the credential Details page and click btn:[Edit]. -** From the navigation panel, select {MenuAMCredentials}. Click btn:[Edit] next to the credential name and edit the appropriate details. +** From the navigation panel, select {MenuAECredentials}. Click btn:[Edit] next to the credential name and edit the appropriate details. . Save your changes. diff --git a/downstream/modules/platform/proc-controller-edit-nodes.adoc b/downstream/modules/platform/proc-controller-edit-nodes.adoc index 3e7adf661e..24abf14c7f 100644 --- a/downstream/modules/platform/proc-controller-edit-nodes.adoc +++ b/downstream/modules/platform/proc-controller-edit-nodes.adoc @@ -5,27 +5,25 @@ .Procedure * Edit a node by using one of these methods: -** If you want to edit a node, click on the node you want to edit. -The pane displays the current selections. -Make your changes and click btn:[Select] to apply them to the graphical view. -** To edit the edge type for an existing link, (*success*, *failure*, *always*), click the link. -The pane displays the current selection. -Make your changes and click btn:[Save] to apply them to the graphical view. -** Click the link (image:link-icon.png[Link icon,15,15]) icon that appears on each node, to add a new link from one node to another. -Doing this highlights the nodes that are possible to link to. -These options are indicated by the dotted lines. -Invalid options are indicated by disabled boxes (nodes) that would otherwise produce an invalid link. -The following example shows the *Demo Project* as a possible option for the *e2e-ec20de52-project* to link to, indicated by the arrows: -+ -image::ug-wf-node-link-scenario.png[Node link scenario] -+ +** If you want to edit a node, click the icon of the node. +The pane displays the current selections, click btn:[Edit] to change these. +Make your changes and click btn:[Finish] to apply them to the graphical view. +** To edit the edge type for an existing link, (*Run on success*, *Run on fail*, *Run always*), click (image:options_menu.png[Plus icon,15,15]) on the existing status. +//** Click the link (image:link-icon.png[Link icon,15,15]) icon that appears on each node, to add a new link from one node to another. +//Doing this highlights the nodes that are possible to link to. +//These options are indicated by the dotted lines. +//Invalid options are indicated by disabled boxes (nodes) that would otherwise produce an invalid link. +//The following example shows the *Demo Project* as a possible option for the *e2e-ec20de52-project* to link to, indicated by the arrows: +//+ +//image::ug-wf-node-link-scenario.png[Node link scenario] +//+ -** To remove a link, click the link and click btn:[UNLINK]. +** To remove a link, click (image:options_menu.png[Plus icon,15,15]) for the link and click btn:[Remove link]. This option only appears in the pane if the target or child node has more than one parent. All nodes must be linked to at least one other node at all times so you must create a new link before removing an old one. * Edit the view of the workflow diagram by using one of these methods: -** Click the settings icon to zoom, pan, or reposition the view. +** Click the examine icon (image:examine.png[Examine icon 15,15]) to zoom in, the reduce icon (image:reduce.png[Reduce icon 15,15]) to zoom out, the expand icon (image:expand.png[Expand icon 15,15]) to fit to screen or the reset icon (image:reset.png[Reset icon 15,15]) to reposition the view. ** Drag the workflow diagram to reposition it on the screen or use the scroll on your mouse to zoom. diff --git a/downstream/modules/platform/proc-controller-enable-provision-callbacks.adoc b/downstream/modules/platform/proc-controller-enable-provision-callbacks.adoc index 28f2f08f9a..1b2529f992 100644 --- a/downstream/modules/platform/proc-controller-enable-provision-callbacks.adoc +++ b/downstream/modules/platform/proc-controller-enable-provision-callbacks.adoc @@ -29,8 +29,9 @@ To callback manually using REST: . Ensure that the request from the host is a POST. The following is an example using `curl` (all on a single line): + +[literal, options="nowrap" subs="+attributes"] ---- -curl -k -f -i -H 'Content-Type:application/json' -XPOST -d '{"host_config_key": "redhat"}' \ +curl -k -i -H 'Content-Type:application/json' -XPOST -d '{"host_config_key": "redhat"}' \ https:///api/v2/job_templates/7/callback/ ---- + diff --git a/downstream/modules/platform/proc-controller-getting-started-with-job-templates.adoc b/downstream/modules/platform/proc-controller-getting-started-with-job-templates.adoc index c317777688..fce2ade197 100644 --- a/downstream/modules/platform/proc-controller-getting-started-with-job-templates.adoc +++ b/downstream/modules/platform/proc-controller-getting-started-with-job-templates.adoc @@ -9,4 +9,4 @@ As part of the initial setup, a *Demo Job Template* is created for you. . To review existing templates, select {MenuAETemplates} from the navigation panel. . Click btn:[Demo Job Template] to view its details. -image::controller-job-template-demo-details.png[Job templates] +//image::controller-job-template-demo-details.png[Job templates] diff --git a/downstream/modules/platform/proc-controller-github-enterprise-org-settings.adoc b/downstream/modules/platform/proc-controller-github-enterprise-org-settings.adoc index 4c42b48c6f..8db63f9380 100644 --- a/downstream/modules/platform/proc-controller-github-enterprise-org-settings.adoc +++ b/downstream/modules/platform/proc-controller-github-enterprise-org-settings.adoc @@ -4,36 +4,34 @@ To set up social authentication for a GitHub Enterprise Organization, you must obtain a GitHub Enterprise Organization URL, an Organization API URL, an Organization OAuth2 key and secret for a web application. -To obtain the URLs, refer to the GitHub documentation on link:https://docs.github.com/en/enterprise-server@3.1/rest/reference/enterprise-admin[GitHub Enterprise administration]. +To obtain the URLs, refer to the link:https://docs.github.com/en/enterprise-server@3.1/rest/reference/enterprise-admin[GitHub Enterprise administration documentation]. -To obtain the key and secret, you must first register your enterprise organization-owned application at `https://github.com/organizations//settings/applications` +The OAuth2 key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. To register the application, you must supply it with your webpage URL, which is the *Callback URL* shown in the Authenticator details for your authenticator configuration. See xref:gw-display-auth-details[Displaying authenticator details] for instructions on accessing this information. -To register the application, you must supply it with your Authorization callback URL, which is the *Callback URL* shown in the *Details* page. - -Because it is hosted on site and not `github.com`, you must specify which authentication adapter it communicates with. - -Each key and secret must belong to a unique application and cannot be shared or reused between different authentication backends. -The OAuth2 key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. +Each key and secret must belong to a unique application and cannot be shared or reused between different authentication backends. The OAuth2 key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. .Procedure -. From the navigation panel, select {MenuAEAdminSettings}. -. On the *Settings* page, select *GitHub settings* from the list of *Authentication* options. -. Click the *GitHub Enterprise Organization* tab. +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *GitHub enterprise organization* from the *Authentication type* list and click btn:[Next]. +. Enter a *Name* for this authentication configuration. +. When the application is registered, GitHub displays the *Client ID* and *Client Secret*: ++ +.. Copy and paste the GitHub Client ID into the GitHub OAuth2 Key field. +.. Copy and paste the GitHub Client Secret into the GitHub OAuth2 Secret field. ++ +. In the *Base URL* field, enter the hostname of the GitHub Enterprise instance, for example, `https://github.example.com`. +. In the *Github OAuth2 Enterprise API URL* field, enter the API URL of the GitHub Enterprise instance, for example, `https://github.example.com/api/v3`. +. Enter the name of your GitHub Enterprise organization, as used in your organization’s URL, for example, `https://github.com//` in the *GitHub OAuth2 Enterprise Org Name* field. + -The *GitHub Enterprise Organization OAuth2 Callback URL* field is already pre-populated and non-editable. -When the application is registered, GitHub displays the Client ID and Client Secret. - -. Click btn:[Edit] to configure GitHub Enterprise Organization settings. -. In the *GitHub Enterprise Organization URL* field, enter the hostname of the GitHub Enterprise Organization instance, for example, https://github.orgexample.com. -. In the *GitHub Enterprise Organization API URL* field, enter the API URL of the GitHub Enterprise Organization instance, for example, https://github.orgexample.com/api/v3. -. Copy and paste GitHub's Client ID into the *GitHub Enterprise Organization OAuth2 Key* field. -. Copy and paste GitHub's Client Secret into the *GitHub Enterprise Organization OAuth2 Secret* field. -. Enter the name of your GitHub Enterprise organization, as used in your organization's URL, for example, https://github.com// in the *GitHub Enterprise Organization Name* field. -. For more information on completing the mapping fields, see xref:ref-controller-organization-mapping[Organization mapping] and xref:ref-controller-team-mapping[Team mapping]. -. Click btn:[Save]. - -.Verification -To verify that the authentication was configured correctly, logout of {ControllerName}. -The login screen displays the GitHub Enterprise Organization logo to enable logging in with those credentials. - -image:configure-controller-auth-github-ent-org-logo.png[image] +include::snippets/snip-gw-authentication-additional-auth-fields.adoc[] ++ +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] ++ +. Click btn:[Next]. + +include::snippets/snip-gw-authentication-verification.adoc[] + +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] \ No newline at end of file diff --git a/downstream/modules/platform/proc-controller-github-enterprise-settings.adoc b/downstream/modules/platform/proc-controller-github-enterprise-settings.adoc index a78b59e8f8..f8b46b5d6d 100644 --- a/downstream/modules/platform/proc-controller-github-enterprise-settings.adoc +++ b/downstream/modules/platform/proc-controller-github-enterprise-settings.adoc @@ -1,37 +1,36 @@ [id="proc-controller-github-enterprise-settings"] -= GitHub Enterprise settings += Configuring GitHub enterprise authentication -To set up social authentication for a GitHub Enterprise, you must obtain a GitHub Enterprise URL, an API URL, OAuth2 key and secret for a web application. +To set up social authentication for a GitHub enterprise, you must obtain a GitHub Enterprise URL, an API URL, OAuth2 key and secret for a web application. -To obtain the URLs, refer to the link:https://docs.github.com/en/enterprise-server@3.1/rest/reference/enterprise-admin[GitHub Enterprise administration] documentation. +To obtain the URLs, refer to the link:https://docs.github.com/en/enterprise-server@3.1/rest/reference/enterprise-admin[GitHub Enterprise administration documentation]. -To obtain the key and secret, you must first register your enterprise-owned application at \https://github.com/organizations//settings/applications. +The OAuth2 key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. To register the application, you must supply it with your webpage URL, which is the *Callback URL* shown in the Authenticator details for your authenticator configuration. See xref:gw-display-auth-details[Displaying authenticator details] for instructions on accessing this information. -To register the application, you must supply it with your Authorization callback URL, which is the *Callback URL* shown in the *Details* page. -Because it is hosted on site and not `github.com`, you must specify which authentication adapter it communicates with. - -Each key and secret must belong to a unique application and cannot be shared or reused between different authentication backends. -The OAuth2 key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. +Each key and secret must belong to a unique application and cannot be shared or reused between different authentication backends. The OAuth2 key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. .Procedure -. From the navigation panel, select {MenuAEAdminSettings}. -. On the *Settings* page, select *GitHub settings* from the list of *Authentication* options. -. Click the *GitHub Enterprise* tab. +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *GitHub enterprise* from the *Authentication type* list and click btn:[Next]. +. Enter a *Name* for this authentication configuration. +. When the application is registered, GitHub displays the *Client ID* and *Client Secret*: ++ +.. Copy and paste the GitHub Client ID into the GitHub OAuth2 Key field. +.. Copy and paste the GitHub Client Secret into the GitHub OAuth2 Secret field. ++ +. In the *Base URL* field, enter the hostname of the GitHub Enterprise instance, for example, `https://github.example.com`. +. In the *Github OAuth2 Enterprise API URL* field, enter the API URL of the GitHub Enterprise instance, for example, `https://github.example.com/api/v3`. ++ +include::snippets/snip-gw-authentication-additional-auth-fields.adoc[] + -The *GitHub Enterprise OAuth2 Callback URL* field is already pre-populated and non-editable. -When the application is registered, GitHub displays the Client ID and Client Secret. - -. Click btn:[Edit] to configure GitHub Enterprise settings. -. In the *GitHub Enterprise URL* field, enter the hostname of the GitHub Enterprise instance, for example, https://github.example.com. -. In the *GitHub Enterprise API URL* field, enter the API URL of the GitHub Enterprise instance, for example, https://github.example.com/api/v3. -. Copy and paste GitHub's Client ID into the *GitHub Enterprise OAuth2 Key* field. -. Copy and paste GitHub's Client Secret into the *GitHub Enterprise OAuth2 Secret* field. -. For more information on completing the mapping fields, see xref:ref-controller-organization-mapping[Organization mapping] and xref:ref-controller-team-mapping[Team mapping]. -. Click btn:[Save]. - -.Verification -To verify that the authentication was configured correctly, logout of {ControllerName}. -The login screen displays the GitHub Enterprise logo to enable logging in with those credentials. - -image:configure-controller-auth-github-ent-logo.png[image] +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] ++ +. Click btn:[Next]. + +include::snippets/snip-gw-authentication-verification.adoc[] + +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] diff --git a/downstream/modules/platform/proc-controller-github-enterprise-team-settings.adoc b/downstream/modules/platform/proc-controller-github-enterprise-team-settings.adoc index 01af48b9b1..89b9e05a00 100644 --- a/downstream/modules/platform/proc-controller-github-enterprise-team-settings.adoc +++ b/downstream/modules/platform/proc-controller-github-enterprise-team-settings.adoc @@ -1,41 +1,41 @@ [id="proc-controller-github-enterprise-team-settings"] -= GitHub Enterprise Team settings += Configuring GitHub enterprise team authentication -To set up social authentication for a GitHub Enterprise team, you must obtain a GitHub Enterprise Organization URL, an Organization API URL, an Organization OAuth2 key and secret for a web application. +To set up social authentication for a GitHub enterprise team, you must obtain a GitHub Enterprise Organization URL, an Organization API URL, an Organization OAuth2 key and secret for a web application. -To obtain the URLs, refer to the GitHub documentation on link:https://docs.github.com/en/enterprise-server@3.1/rest/reference/enterprise-admin[GitHub Enterprise administration]. +To obtain the URLs, refer to the link:https://docs.github.com/en/enterprise-server@3.1/rest/reference/enterprise-admin[GitHub Enterprise administration documentation]. -To obtain the key and secret, you must first register your enterprise team-owned application at `https://github.com/organizations//settings/applications`. +To obtain the key and secret, you must first register your enterprise organization-owned application at `https://github.com/organizations//settings/applications`. -To register the application, you must supply it with your Authorization callback URL, which is the *Callback URL* shown in the *Details* page. -Because it is hosted on site and not github.com, you must specify which authentication adapter it communicates with. +The OAuth2 key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. To register the application, you must supply it with your webpage URL, which is the *Callback URL* shown in the Authenticator details for your authenticator configuration. See xref:gw-display-auth-details[Displaying authenticator details] for instructions on accessing this information. Each key and secret must belong to a unique application and cannot be shared or reused between different authentication backends. The OAuth2key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. .Procedure -. Find the numeric team ID using the link:https://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/[GitHub API]. -The Team ID will be used to supply a required field in the UI. -. From the navigation panel, select {MenuAEAdminSettings}. -. On the *Settings* page, select *GitHub settings* from the list of *Authentication* options. -. Click the *GitHub Enterprise Team* tab. + +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *GitHub enterprise team* from the *Authentication type* list and click btn:[Next]. +. Enter a *Name* for this authentication configuration. +. When the application is registered, GitHub displays the *Client ID* and *Client Secret*: ++ +.. Copy and paste the GitHub Client ID into the GitHub OAuth2 Key field. +.. Copy and paste the GitHub Client Secret into the GitHub OAuth2 Secret field. ++ +. In the *Base URL* field, enter the hostname of the GitHub Enterprise instance, for example, `https://github.orgexample.com`. +. In the *Github OAuth2 Enterprise API URL* field, enter the API URL of the GitHub Enterprise instance, for example, `https://github.example.com/api/v3`. +. Enter the OAuth2 key (Client ID) from your GitHub developer application in the *GitHub OAuth2* team ID field. ++ +include::snippets/snip-gw-authentication-additional-auth-fields.adoc[] + -The *GitHub Enterprise Team OAuth2 Callback URL* field is already pre-populated and non-editable. -When the application is registered, GitHub displays the Client ID and Client Secret. - -. Click btn:[Edit] to configure GitHub Enterprise Team settings. -. In the *GitHub Enterprise Team URL* field, enter the hostname of the GitHub Enterprise team instance, for example, https://github.teamexample.com. -. In the *GitHub Enterprise Team API URL* field, enter the API URL of the GitHub Enterprise team instance, for example, -https://github.teamexample.com/api/v3. -. Copy and paste GitHub's Client ID into the *GitHub Enterprise Team OAuth2 Key* field. -. Copy and paste GitHub's Client Secret into the *GitHub Enterprise Team OAuth2 Secret* field. -. Copy and paste GitHub's team ID in the *GitHub Enterprise Team ID* field. -. For more information on completing the mapping fields, see xref:ref-controller-organization-mapping[Organization mapping] and xref:ref-controller-team-mapping[Team mapping]. -. Click btn:[Save]. - -.Verification -To verify that the authentication was configured correctly, logout of {ControllerName}. -The login screen displays the GitHub Enterprise Teams logo to enable logging in with those credentials. - -image:configure-controller-auth-github-ent-teams-logo.png[image] +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] ++ +. Click btn:[Next]. + +include::snippets/snip-gw-authentication-verification.adoc[] + +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] diff --git a/downstream/modules/platform/proc-controller-github-organization-settings.adoc b/downstream/modules/platform/proc-controller-github-organization-settings.adoc new file mode 100644 index 0000000000..e0ede5de12 --- /dev/null +++ b/downstream/modules/platform/proc-controller-github-organization-settings.adoc @@ -0,0 +1,37 @@ +[id="proc-controller-github-organization-settings"] + += Configuring GitHub organization authentication + +When defining account authentication with either an organization or a team within an organization, you should use the specific organization and team settings. Account authentication can be limited by an organization and by a team within an organization. +You can also choose to permit all by specifying non-organization or non-team based settings. +You can limit users who can log in to the platform by limiting only those in an organization or on a team within an organization. + +To set up social authentication for a GitHub organization, you must obtain an OAuth2 key and secret for a web application using the link:https://docs.github.com/en/apps/using-github-apps/installing-your-own-github-app[registering the new application with GitHub]. + +The OAuth2 key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. To register the application, you must supply it with your webpage URL, which is the Callback URL shown in the Authenticator details for your authenticator configuration. See xref:gw-display-auth-details[Displaying authenticator details] for instructions on accessing this information. + +.Procedure +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *GitHub organization* from the *Authentication type* list and click btn:[Next]. +. Enter a *Name* for this authentication configuration. +. When the application is registered, GitHub displays the *Client ID* and *Client Secret*: ++ +.. Copy and paste the GitHub Client ID into the GitHub OAuth2 Key field. +.. Copy and paste the GitHub Client Secret into the GitHub OAuth2 Secret field. ++ +. Enter the name of your GitHub organization, as used in your organization’s URL, for example, `https://github.com//` in the *GitHub OAuth Organization Name* field. ++ +include::snippets/snip-gw-authentication-additional-auth-fields.adoc[] ++ +. Enter the authorization scope for users in the *GitHub OAuth2 Scope* field. The default is `read:org`. ++ +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] ++ +. Click btn:[Next]. + +include::snippets/snip-gw-authentication-verification.adoc[] + +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] diff --git a/downstream/modules/platform/proc-controller-github-organization-setttings.adoc b/downstream/modules/platform/proc-controller-github-organization-setttings.adoc deleted file mode 100644 index 3800c32e70..0000000000 --- a/downstream/modules/platform/proc-controller-github-organization-setttings.adoc +++ /dev/null @@ -1,37 +0,0 @@ -[id="proc-controller-github-organization-setttings"] - -= GitHub Organization settings - -When defining account authentication with either an organization or a team within an organization, you should use the specific organization and team settings. -Account authentication can be limited by an organization and by a team within an organization. - -You can also choose to permit all by specifying non-organization or non-team based settings. - -You can limit users who can login to the controller by limiting only those in an organization or on a team within an organization. - -To set up social authentication for a GitHub Organization, you must obtain an OAuth2 key and secret for a web application. To do this, you must first register your organization-owned application at \https://github.com/organizations//settings/applications. - -To register the application, you must supply it with your Authorization callback URL, which is the *Callback URL* shown in the *Details* page. -Each key and secret must belong to a unique application and cannot be shared or reused between different authentication backends. -The OAuth2 key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. - -.Procedure -. From the navigation panel, select {MenuAEAdminSettings}. -. On the *Settings* page, select *GitHub settings* from the list of *Authentication* options. -. Select the *GitHub Organization* tab. -+ -The *GitHub Organization OAuth2 Callback URL* field is already pre-populated and non-editable. -+ -When the application is registered, GitHub displays the Client ID and Client Secret. - -. Click btn:[Edit] and copy and paste GitHub's Client ID into the *GitHub Organization OAuth2 Key* field. -. Copy and paste GitHub's Client Secret into the *GitHub Organization OAuth2 Secret* field. -. Enter the name of your GitHub organization, as used in your organization's URL, for example, \https://github.com// in the *GitHub Organization Name* field. -. For more information on completing the mapping fields, see xref:ref-controller-organization-mapping[Organization mapping] and xref:ref-controller-team-mapping[Team mapping]. -. Click btn:[Save]. - -.Verification -To verify that the authentication was configured correctly, logout of {ControllerName}. -The login screen displays the GitHub Organization logo to enable logging in with those credentials. - -image:configure-controller-auth-github-orgs-logo.png[image] diff --git a/downstream/modules/platform/proc-controller-github-settings.adoc b/downstream/modules/platform/proc-controller-github-settings.adoc index 06682efff7..8f2d1aadd0 100644 --- a/downstream/modules/platform/proc-controller-github-settings.adoc +++ b/downstream/modules/platform/proc-controller-github-settings.adoc @@ -1,28 +1,30 @@ [id="proc-controller-github-settings"] -= GitHub settings += Configuring GitHub authentication -To set up social authentication for GitHub, you must obtain an OAuth2 key and secret for a web application. -To do this, you must first register the new application with GitHub at https://github.com/settings/developers. +You can connect GitHub identities to {PlatformNameShort} using OAuth. To set up GitHub authentication, you need to obtain an OAuth2 key and secret by registering your organization-owned application from GitHub using the link:https://docs.github.com/en/apps/using-github-apps/installing-your-own-github-app[registering the new application with GitHub]. -To register the application, you must supply it with your homepage URL, which is the *Callback URL* shown in the *Details* tab of the *GitHub default settings* page. -The OAuth2 key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. +The OAuth2 key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. To register the application, you must supply it with your webpage URL, which is the Callback URL shown in the Authenticator details for your authenticator configuration. See xref:gw-display-auth-details[Displaying authenticator details] for instructions on accessing this information. .Procedure -. From the navigation panel, select {MenuAEAdminSettings}. -. On the *Settings* page, select *GitHub settings* from the list of *Authentication* options. -. Select the *GitHub Default* tab if not already selected. -+ -The *GitHub OAuth2 Callback URL* field is already pre-populated and non-editable. -When the application is registered, GitHub displays the Client ID and Client Secret. -. Click btn:[Edit] and copy and paste the GitHub Client ID into the *GitHub OAuth2 Key* field. -. Copy and paste the GitHub Client Secret into the *GitHub OAuth2 Secret* field. -. For more information on completing the mapping fields, see xref:ref-controller-organization-mapping[Organization mapping] and xref:ref-controller-team-mapping[Team mapping]. -. Click btn:[Save]. +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *GitHub* from the *Authentication type* list and click btn:[Next]. +. Enter a *Name* for this authentication configuration. +. When the application is registered, GitHub displays the *Client ID* and *Client Secret*: ++ +.. Copy and paste the GitHub Client ID into the GitHub OAuth2 Key field. +.. Copy and paste the GitHub Client Secret into the GitHub OAuth2 Secret field. ++ +include::snippets/snip-gw-authentication-additional-auth-fields.adoc[] ++ +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] ++ +. Click btn:[Next]. -.Verification -To verify that the authentication was configured correctly, logout of {ControllerName}. -The login screen now displays the GitHub logo to enable logging in with those credentials. +include::snippets/snip-gw-authentication-verification.adoc[] -image:configure-controller-auth-github-logo.png[image] +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] diff --git a/downstream/modules/platform/proc-controller-github-team-settings.adoc b/downstream/modules/platform/proc-controller-github-team-settings.adoc index ce1dc5f830..981c60dc68 100644 --- a/downstream/modules/platform/proc-controller-github-team-settings.adoc +++ b/downstream/modules/platform/proc-controller-github-team-settings.adoc @@ -1,32 +1,35 @@ [id="proc-controller-github-team-settings"] -= GitHub Team settings += Configuring GitHub team authentication -To set up social authentication for a GitHub Team, you must obtain an OAuth2 key and secret for a web application. -To do this, you must first register your team-owned application at `https://github.com/organizations//settings/applications`. -To register the application, you must supply it with your Authorization callback URL, which is the *Callback URL* shown in the *Details* page. -Each key and secret must belong to a unique application and cannot be shared or reused between different authentication -backends. -The OAuth2 key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. +To set up social authentication for a GitHub team, you must obtain an OAuth2 key and secret for a web application using the instructions provided in link:https://docs.github.com/en/apps/using-github-apps/installing-your-own-github-app[registering the new application with GitHub]. + +The OAuth2 key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. To register the application, you must supply it with your webpage URL, which is the *Callback URL* shown in the Authenticator details for your authenticator configuration. See xref:gw-display-auth-details[Displaying authenticator details] for instructions on accessing this information. + +Each key and secret must belong to a unique application and cannot be shared or reused between different authentication backends. The OAuth2 key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. .Procedure -. Find the numeric team ID using the link:https://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/[GitHub API]. -The Team ID is used to supply a required field in the UI. -. From the navigation panel, select {MenuAEAdminSettings}. -. On the *Settings* page, select *GitHub settings* from the list of *Authentication* options. -. Click the *GitHub Team* tab. -+ -The *GitHub Team OAuth2 Callback URL* field is already pre-populated and non-editable. -When the application is registered, GitHub displays the Client ID and Client Secret. -. Click btn:[Edit] and copy and paste GitHub's Client ID into the *GitHub Team OAuth2 Key* field. -. Copy and paste GitHub's Client Secret into the *GitHub Team OAuth2 Secret* field. -. Copy and paste GitHub's team ID in the *GitHub Team ID* field. -. For more information on completing the mapping fields, see xref:ref-controller-organization-mapping[Organization mapping] and xref:ref-controller-team-mapping[Team mapping]. -. Click btn:[Save] +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *GitHub team* from the *Authentication type* list and click btn:[Next]. +. Enter a *Name* for this authentication configuration. +. When the application is registered, GitHub displays the *Client ID* and *Client Secret*: ++ +.. Copy and paste the GitHub Client ID into the GitHub OAuth2 Key field. +.. Copy and paste the GitHub Client Secret into the GitHub OAuth2 Secret field. ++ +. Copy and paste GitHub’s team ID in the *GitHub OAuth2 Team ID* field. +. Enter the authorization scope for users in the GitHub OAuth2 Scope field. The default is `read:org`. ++ +include::snippets/snip-gw-authentication-additional-auth-fields.adoc[] ++ +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] ++ +. Click btn:[Next]. -.Verification -To verify that the authentication was configured correctly, logout of {ControllerName}. -The login screen displays the GitHub Team logo to enable logging in with those credentials. +include::snippets/snip-gw-authentication-verification.adoc[] -image:configure-controller-auth-github-teams-logo.png[image] +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] diff --git a/downstream/modules/platform/proc-controller-google-oauth2-settings.adoc b/downstream/modules/platform/proc-controller-google-oauth2-settings.adoc index b6af819555..5787a9e34d 100644 --- a/downstream/modules/platform/proc-controller-google-oauth2-settings.adoc +++ b/downstream/modules/platform/proc-controller-google-oauth2-settings.adoc @@ -1,36 +1,45 @@ +:_mod-docs-content-type: PROCEDURE + [id="proc-controller-google-oauth2-settings"] -= Google OAuth2 settings += Configuring Google OAuth2 authentication -To set up social authentication for Google, you must obtain an OAuth2 key and secret for a web application. -To do this, you must first create a project and set it up with Google. +To set up social authentication for Google, you must obtain an OAuth2 key and secret for a web application. To do this, you must first create a project and set it up with Google. For instructions, see link:https://support.google.com/googleapi/answer/6158849[Setting up OAuth 2.0] in the Google API Console Help documentation. -If you have already completed the setup process, you can access those credentials by going to the Credentials section of the -link:https://console.developers.google.com/[Google API Manager Console]. -The OAuth2 key (Client ID) and secret (Client secret) are used to supply the required fields in the UI. +If you have already completed the setup process, you can access those credentials by going to the Credentials section of the link:https://console.cloud.google.com/projectselector2/apis/dashboard?pli=1&supportedpurview=project[Google API Manager Console]. The OAuth2 key (Client ID) and secret (Client secret) are used to supply the required fields in the UI. .Procedure -. From the navigation panel, select {MenuAEAdminSettings}. -. On the *Settings* page, select *Google OAuth 2 settings* from the list of *Authentication* options. -+ -The *Google OAuth2 Callback URL* field is already pre-populated and non-editable. - -. The following fields are also pre-populated. -If not, use the credentials Google supplied during the web application setup process, and look for the values with the same format as the ones shown in the example below: -* Click *Edit* and copy and paste Google's Client ID into the *Google OAuth2 Key* field. -* Copy and paste Google's Client secret into the *Google OAuth2 Secret* field. +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *Google OAuth* from the *Authentication type* list and click btn:[Next]. +. Enter a *Name* for this authentication setting. +. The *Google OAuth2 Key* and *Google OAuth2 Secret* fields are pre-populated. + -image:configure-controller-auth-google.png[image] - -. To complete the remaining optional fields, refer to the tooltips in each of the fields for instructions and required format. -. For more information on completing the mapping fields, see xref:ref-controller-organization-mapping[Organization mapping] and xref:ref-controller-team-mapping[Team mapping]. -. Click btn:[Save]. +If not, use the credentials Google supplied during the web application setup process. Save these settings for use in the following steps. ++ +. Copy and paste Google’s Client ID into the *Google OAuth2 Key* field. +. Copy and paste Google’s Client secret into the *Google OAuth2 Secret* field. +. Optional: Enter information for the following fields using the tooltips provided for instructions and required format: ++ +* *Access Token URL* +* *Access Token Method* +* *Authorization URL* +* *Revoke Token Method* +* *Revoke Token URL* +* *OIDC JWT Algorithm(s)* +* *OIDC JWT* ++ +include::snippets/snip-gw-authentication-additional-auth-fields.adoc[] ++ +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] ++ +Click btn:[Next]. -.Verification -To verify that the authentication was configured correctly, logout of {ControllerName}. -The login screen displays the Google logo to indicate it as an alternate method of logging into {ControllerName}. +include::snippets/snip-gw-authentication-verification.adoc[] -image:configure-controller-auth-google-logo.png[image] +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] diff --git a/downstream/modules/platform/proc-controller-importing-subscriptions.adoc b/downstream/modules/platform/proc-controller-importing-subscriptions.adoc index fbf13c0e34..5f4fc8c333 100644 --- a/downstream/modules/platform/proc-controller-importing-subscriptions.adoc +++ b/downstream/modules/platform/proc-controller-importing-subscriptions.adoc @@ -3,6 +3,18 @@ = Importing a subscription After you have obtained an authorized {PlatformNameShort} subscription, you must import it into the {ControllerName} system before you can use {ControllerName}. + +[NOTE] +==== +You are opted in for {Analytics} by default when you activate the {ControllerName} on first time log in. This helps Red Hat improve the product by delivering you a much better user experience. You can opt out, by doing the following: +. From the navigation panel, select menu:Settings[] and select the *Miscellaneous System settings* option. +. Click btn:[Edit]. +. Toggle the *Gather data for Automation Analytics* switch to the off position. +. Click btn:[Save]. +For opt-in of {Analytics} to be effective, your instance of {ControllerName} must be running on {RHEL}. +For more information, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#ref-controller-automation-analytics[{Analytics}] section. +==== + .Prerequisites * You have obtained a subscriptions manifest. @@ -34,25 +46,26 @@ After you enter your credentials, click btn:[Get Subscriptions]. Then, it prompts you to select the subscription that you want to run and applies that metadata to {ControllerName}. You can log in over time and retrieve new subscriptions if you have renewed. + -. Click btn:[Next] to proceed to the *Tracking and Insights* page. -+ -Tracking and insights collect data to help Red Hat improve the product and deliver a better user experience. -For more information about data collection, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#controller-usability-analytics-data-collection[Usability Analytics and Data Collection] of the _{ControllerAG}_. -+ -This option is checked by default, but you can opt out of any of the following: +. Click btn:[Next] to proceed to the End User Agreement. +//[ddacosta - removed analytics selection for AAP-30863 and AAP-29909] to proceed to the *Tracking and Insights* page. +//+ +//Tracking and insights collect data to help Red Hat improve the product and deliver a better user experience. +//For more information about data collection, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#controller-usability-analytics-data-collection[Usability Analytics and Data Collection] of the _{ControllerAG}_. +//+ +//This option is checked by default, but you can opt out of any of the following: //* *User analytics*. Collects data from the controller UI. -* *Insights Analytics*. Provides a high level analysis of your automation with {ControllerName}. -It helps you to identify trends and anomalous use of the controller. -For opt-in of {Analytics} to be effective, your instance of {ControllerName} must be running on {RHEL}. -For more information, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#ref-controller-automation-analytics[{Analytics}] section. -+ -[NOTE] -==== -You can change your analytics data collection preferences at any time. -==== -+ -. After you have specified your tracking and Insights preferences, click btn:[Next] to proceed to the End User Agreement. +//* *Insights Analytics*. Provides a high level analysis of your automation with {ControllerName}. +//It helps you to identify trends and anomalous use of the controller. +//For opt-in of {Analytics} to be effective, your instance of {ControllerName} must be running on {RHEL}. +//For more information, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/configuring_automation_execution/index#ref-controller-automation-analytics[{Analytics}] section of _{ControllerAG}_. +//+ +//[NOTE] +//==== +//You can change your analytics data collection preferences at any time. +//==== +//+ +//. After you have specified your tracking and Insights preferences, click btn:[Next] . Review and check the *I agree to the End User License Agreement* checkbox and click btn:[Submit]. + After your subscription is accepted, {ControllerName} displays the subscription details and opens the Dashboard. diff --git a/downstream/modules/platform/proc-controller-ingress-options.adoc b/downstream/modules/platform/proc-controller-ingress-options.adoc index ba83b5b6f8..72b47ccdb9 100644 --- a/downstream/modules/platform/proc-controller-ingress-options.adoc +++ b/downstream/modules/platform/proc-controller-ingress-options.adoc @@ -1,12 +1,18 @@ [id="proc-controller-ingress-options_{context}"] -= Configuring the Ingress type for your {ControllerName} operator += Configuring the ingress type for your {ControllerName} operator -The {PlatformName} operator installation form allows you to further configure your {ControllerName} operator Ingress under *Advanced configuration*. +The {OperatorPlatformNameShort} installation form allows you to further configure your {ControllerName} operator ingress under *Advanced configuration*. .Procedure -. Click btn:[Advanced Configuration]. +. Log in to {OCP}. +. Navigate to menu:Operators[Installed Operators]. +. Select your {OperatorPlatformNameShort} deployment. +. Select the *Automation Controller* tab. +. For new instances, click btn:[Create AutomationController]. +.. For existing instances, you can edit the YAML view by clicking the {MoreActionsIcon} icon and then btn:[Edit AutomationController]. +. Click btn:[Advanced configuration]. . Under *Ingress type*, click the drop-down menu and select *Ingress*. . Under *Ingress annotations*, enter any annotations to add to the ingress. . Under *Ingress TLS secret*, click the drop-down menu and select a secret from the list. @@ -18,20 +24,27 @@ You can view the progress by navigating to menu:Workloads[Pods] and locating the .Verification Verify that the following operator pods provided by the {PlatformNameShort} Operator installation from {ControllerName} are running: -[cols="a,a,a"] +[cols="a,a,a,a"] |=== -| Operator manager controllers | {ControllerName} |{HubName} +| Operator manager controllers | {ControllerNameStart} |{HubNameStart} |{EDAName} (EDA) -| The operator manager controllers for each of the 3 operators, include the following: +| The operator manager controllers for each of the three operators, include the following: * automation-controller-operator-controller-manager * automation-hub-operator-controller-manager * resource-operator-controller-manager -| After deploying {ControllerName}, you will see the addition of these pods: +* aap-gateway-operator-controller-manager +* ansible-lightspeed-operator-controller-manager +* eda-server-operator-controller-manager + +| After deploying {ControllerName}, you can see the addition of the following pods: * controller * controller-postgres -| After deploying {HubName}, you will see the addition of these pods: +* controller-web +* controller-task + +| After deploying {HubName}, you can see the addition of the following pods: * hub-api * hub-content @@ -39,6 +52,14 @@ Verify that the following operator pods provided by the {PlatformNameShort} Oper * hub-redis * hub-worker +| After deploying EDA, you can see the addition of the following pods: + +* eda-activation-worker +* da-api +* eda-default-worker +* eda-event-stream +* eda-scheduler + |=== [NOTE] diff --git a/downstream/modules/platform/proc-controller-inv-source-aap.adoc b/downstream/modules/platform/proc-controller-inv-source-aap.adoc index 29570a1240..cb35a9ee6a 100644 --- a/downstream/modules/platform/proc-controller-inv-source-aap.adoc +++ b/downstream/modules/platform/proc-controller-inv-source-aap.adoc @@ -7,11 +7,13 @@ Use the following procedure to configure an {ControllerName}-sourced inventory. .Procedure . From the navigation panel, select {MenuInfrastructureInventories}. . Select the inventory name you want a source to and click the *Sources* tab. -. Click btn:[Add source]. -. In the *Add new source* page, select *{PlatformName}* from the *Source* list. -. The *Add new source* window expands with the required *Credential* field. -Choose from an existing {PlatformName} Credential. -For more information, see xref:controller-credentials[Credentials]. +. Click btn:[Create source]. +. In the *Create source* page, select *{PlatformName}* from the *Source* list. +. The *Create source* window expands with additional fields. +Enter the following details: + +* Optional: *Credential*: Choose from an existing {PlatformName} Credential. +For more information, see xref:controller-credentials[Managing user credentials]. . Optional: You can specify the verbosity, host filter, enabled variables or values, and update options as described in xref:proc-controller-add-source[Adding a source]. . Use the *Source Variables* field to override variables used by the `controller` inventory plugin. Enter variables by using either JSON or YAML syntax. diff --git a/downstream/modules/platform/proc-controller-inv-source-gce.adoc b/downstream/modules/platform/proc-controller-inv-source-gce.adoc index 0bdb877a69..f244728a99 100644 --- a/downstream/modules/platform/proc-controller-inv-source-gce.adoc +++ b/downstream/modules/platform/proc-controller-inv-source-gce.adoc @@ -7,11 +7,11 @@ Use the following procedure to configure a Google-sourced inventory: .Procedure . From the navigation panel, select {MenuInfrastructureInventories}. . Select the inventory name you want a source to and click the *Sources* tab. -. Click btn:[Add source] +. Click btn:[Create source]. . In the *Add new source* page, select *Google Compute Engine* from the *Source* list. -. The *Add new source* window expands with the required *Credential* field. +. The *Create source* window expands with the required *Credential* field. Choose from an existing GCE Credential. -For more information, see xref:controller-credentials[Credentials]. +For more information, see xref:controller-credentials[Managing user credentials]. //+ //image:inventories-create-source-GCE-example.png[Inventories- create source - GCE example] diff --git a/downstream/modules/platform/proc-controller-inv-source-insights.adoc b/downstream/modules/platform/proc-controller-inv-source-insights.adoc index b765ced2fa..056e07a81f 100644 --- a/downstream/modules/platform/proc-controller-inv-source-insights.adoc +++ b/downstream/modules/platform/proc-controller-inv-source-insights.adoc @@ -7,11 +7,13 @@ Use the following procedure to configure a Red Hat Insights-sourced inventory. .Procedure . From the navigation panel, select {MenuInfrastructureInventories}. . Select the inventory name you want a source to and click the *Sources* tab. -. Click btn:[Add source]. -. In the *Add new source* page, select *Red Hat Insights* from the *Source* list. -. The *Add new source* window expands with the required *Credential* field. -Choose from an existing Red Hat Insights Credential. -For more information, see xref:controller-credentials[Credentials]. +. Click btn:[Create source]. +. In the *Create source* page, select *Red Hat Insights* from the *Source* list. +. The *Create source* window expands with additional fields. +Enter the following details: + +* Optional: *Credential*: Choose from an existing Red Hat Insights Credential. +For more information, see xref:controller-credentials[Managing user credentials]. . Optional: You can specify the verbosity, host filter, enabled variables or values, and update options as described in xref:proc-controller-add-source[Adding a source]. . Use the *Source Variables* field to override variables used by the `insights` inventory plugin. Enter variables by using either JSON or YAML syntax. diff --git a/downstream/modules/platform/proc-controller-inv-source-open-shift-virt.adoc b/downstream/modules/platform/proc-controller-inv-source-open-shift-virt.adoc new file mode 100644 index 0000000000..92413f0e3c --- /dev/null +++ b/downstream/modules/platform/proc-controller-inv-source-open-shift-virt.adoc @@ -0,0 +1,34 @@ +[id="proc-controller-inv-source-open-shift-virt"] + += OpenShift Virtualization + +This inventory source uses a cluster that is able to deploy Red Hat OpenShift Container Platform Virtualization. +To configure a Red Hat OpenShift Container Platform Virtualization, you need a virtual machine deployed in a specific namespace and an OpenShift or Kubernetes API Bearer Token credential. + +.Procedure + +. From the navigational panel, select {MenuInfrastructureInventories}. +. Select the inventory that you want to add a source to. +. In the *Sources* tab, click btn:[Add source]. +. From the menu:Source[] menu, select *OpenShift Virtualization*. +* The *Add new source* window expands with the required *Credential* field. ++ +Choose from an existing Kubernetes API Bearer Token credential. +For more information, see xref:ref-controller-credential-openShift[OpenShift or Kubernetes API Bearer Token credential type]. +In this example, the `cmv2.engineering.redhat.com` credential is used. +. You can optionally specify the *Verbosity*, *Host Filter*, *Enabled Variable/Value*, and *Update options* as described in the xref:proc-controller-add-source[Adding a source] steps. +. Use the *Source Variables* field to override variables used by the `kubernetes` inventory plugin. +Enter variables by using either JSON or YAML syntax. +Use the radio button to toggle between the two. +For more information about these variables, see the link:https://kubevirt.io/kubevirt.core/main/plugins/kubevirt.html#parameters[kubevirt.core.kubevirt inventory source] documentation. ++ +In the following example, the connections variable is used to specify access to a particular namespace in a cluster: ++ +---- +--- +connections: +- namespaces: + - hao-test +---- ++ +. Click btn:[Save] and then click btn:[Sync] to sync the inventory. diff --git a/downstream/modules/platform/proc-controller-inv-source-openstack.adoc b/downstream/modules/platform/proc-controller-inv-source-openstack.adoc index 5c3631a256..1c780fdaba 100644 --- a/downstream/modules/platform/proc-controller-inv-source-openstack.adoc +++ b/downstream/modules/platform/proc-controller-inv-source-openstack.adoc @@ -7,11 +7,13 @@ Use the following procedure to configure an OpenStack-sourced inventory. .Procedure . From the navigation panel, select {MenuInfrastructureInventories}. . Select the inventory name you want a source to and click the *Sources* tab. -. Click btn:[Add source]. -. In the *Add new source* page, select *OpenStack* from the *Source* list. -. The *Add new Source* window expands with the required *Credential* field. -Choose from an existing OpenStack Credential. -For more information, see xref:controller-credentials[Credentials]. +. Click btn:[Create source]. +. In the *Create source* page, select *OpenStack* from the *Source* list. +. The *Create source* window expands with additional fields. +Enter the following details: + +* Optional: *Credential*: Choose from an existing OpenStack Credential. +For more information, see xref:controller-credentials[Managing user credentials]. . Optional: You can specify the verbosity, host filter, enabled variables or values, and update options as described in xref:proc-controller-add-source[Adding a source]. . Use the *Source Variables* field to override variables used by the `openstack` inventory plugin. Enter variables by using either JSON or YAML syntax. diff --git a/downstream/modules/platform/proc-controller-inv-source-rh-virt.adoc b/downstream/modules/platform/proc-controller-inv-source-rh-virt.adoc index 64d52524c1..1deb8c4c4e 100644 --- a/downstream/modules/platform/proc-controller-inv-source-rh-virt.adoc +++ b/downstream/modules/platform/proc-controller-inv-source-rh-virt.adoc @@ -7,11 +7,13 @@ Use the following procedure to configure a Red Hat virtualization-sourced invent .Procedure . From the navigation panel, select {MenuInfrastructureInventories}. . Select the inventory name you want a source to and click the *Sources* tab. -. Click btn:[Add source]. -. In the *Add new source* page, select *Red Hat Virtualization* from the *Source* list. -. The *Add new source* window expands with the required *Credential* field. -Choose from an existing Red Hat Virtualization Credential. -For more information, see xref:controller-credentials[Credentials]. +. Click btn:[Create source]. +. In the *Create source* page, select *Red Hat Virtualization* from the *Source* list. +. The *Create source* window expands with additional fields. +Enter the following details: + +* Optional: *Credential*: Choose from an existing Red Hat Virtualization Credential. +For more information, see xref:controller-credentials[Managing user credentials]. . Optional: You can specify the verbosity, host filter, enabled variables or values, and update options as described in xref:proc-controller-add-source[Adding a source]. . Use the *Source Variables* field to override variables used by the `ovirt` inventory plugin. Enter variables by using either JSON or YAML syntax. diff --git a/downstream/modules/platform/proc-controller-inv-source-satellite.adoc b/downstream/modules/platform/proc-controller-inv-source-satellite.adoc index 80f15beb13..1502aaf5d2 100644 --- a/downstream/modules/platform/proc-controller-inv-source-satellite.adoc +++ b/downstream/modules/platform/proc-controller-inv-source-satellite.adoc @@ -7,11 +7,13 @@ Use the following procedure to configure a Red Hat Satellite-sourced inventory. .Procedure . From the navigation panel, select {MenuInfrastructureInventories}. . Select the inventory name you want a source to and click the *Sources* tab. -. Click btn:[Add source]. -. In the *Add new source* page, select *Red Hat Satellite 6* from the *Source* list. -. The *Add new source* window expands with the required *Credential* field. -Choose from an existing Satellite Credential. -For more information, see xref:controller-credentials[Credentials]. +. Click btn:[Create source]. +. In the *Create source* page,, select *Red Hat Satellite 6* from the *Source* list. +. The *Create source* window expands with additional fields. +Enter the following details: + +* Optional: *Credential*: Choose from an existing Satellite Credential. +For more information, see xref:controller-credentials[Managing user credentials]. . Optional: You can specify the verbosity, host filter, enabled variables or values, and update options as described in xref:proc-controller-add-source[Adding a source]. . Use the *Source Variables* field to specify parameters used by the `foreman` inventory source. Enter variables by using either JSON or YAML syntax. diff --git a/downstream/modules/platform/proc-controller-inv-source-terraform.adoc b/downstream/modules/platform/proc-controller-inv-source-terraform.adoc index 547d423f15..6aace1b2ae 100644 --- a/downstream/modules/platform/proc-controller-inv-source-terraform.adoc +++ b/downstream/modules/platform/proc-controller-inv-source-terraform.adoc @@ -14,11 +14,11 @@ The plugin parses a terraform state file and add hosts for AWS EC2, GCE, and {Az ** Enter the appropriate details according to the steps in xref:proc-controller-adding-a-project[Adding a new project]. . From the navigational panel, select {MenuInfrastructureInventories}. . Select the inventory that you want to add a source to. -. In the *Sources* tab, click btn:[Add source]. +. In the *Sources* tab, click btn:[Create source]. . From the menu:Source[] menu, select *Terraform State*. -* The *Add new source* window expands with the required *Credential* field. +* The *Create source* window expands with the optional *Credential* field. + -Choose from an existing Terraform backend configuration credential. For more information, see xref:ref-controller-credential-terraform[Terraform backend configuration]. +Choose an existing Terraform backend configuration credential. For more information, see xref:ref-controller-credential-terraform[Terraform backend configuration]. . Enable the options to *Overwrite* and *Update on Launch*. . Use the *Source Variables* field to override variables used by the `terraform_state` inventory plugin. Enter variables by using either JSON or YAML syntax. diff --git a/downstream/modules/platform/proc-controller-inv-source-vm-vcenter.adoc b/downstream/modules/platform/proc-controller-inv-source-vm-vcenter.adoc index e78f29c5d7..ec547f4c28 100644 --- a/downstream/modules/platform/proc-controller-inv-source-vm-vcenter.adoc +++ b/downstream/modules/platform/proc-controller-inv-source-vm-vcenter.adoc @@ -7,11 +7,13 @@ Use the following procedure to configure a VMWare-sourced inventory. .Procedure . From the navigation panel, select {MenuInfrastructureInventories}. . Select the inventory name you want a source to and click the *Sources* tab. -. Click btn:[Add source]. -. In the *Add new source* page, select *VMware vCenter* from the *Source* list. -. The *Add new source* window expands with the required *Credential* field. -Choose from an existing VMware Credential. -For more information, see xref:controller-credentials[Credentials]. +. Click btn:[Create source]. +. In the *Create source* page, select *VMware vCenter* from the *Source* list. +. The *Create source* window expands with additional fields. +Enter the following details: + +* Optional: *Credential*: Choose from an existing VMware credential. +For more information, see xref:controller-credentials[Managing user credentials]. . Optional: You can specify the verbosity, host filter, enabled variables or values, and update options as described in xref:proc-controller-add-source[Adding a source]. . Use the *Source Variables* field to override variables used by the `vmware_inventory` inventory plugin. @@ -23,7 +25,7 @@ For more information about these variables, see the link:https://github.com/ansi ==== VMWare properties have changed from lower case to camel case. {ControllerNameStart} provides aliases for the top-level keys, but lower case keys in nested properties have been discontinued. -For a list of valid and supported properties, see link:https://docs.ansible.com/ansible/latest/collections/community/vmware/docsite/vmware_scenarios/vmware_inventory_vm_attributes.html[Using Virtual machine attributes in VMware dynamic inventory plugin]. +For a list of valid and supported properties, see link:https://docs.ansible.com/ansible/4/scenario_guides/vmware_scenarios/vmware_inventory_vm_attributes.html[Using Virtual machine attributes in VMware dynamic inventory plugin]. ==== //image:inventories-create-source-vmware-example.png[Inventories- create source - VMWare example] diff --git a/downstream/modules/platform/proc-controller-launch-job-template.adoc b/downstream/modules/platform/proc-controller-launch-job-template.adoc index 41b9bb2615..0ab06fa148 100644 --- a/downstream/modules/platform/proc-controller-launch-job-template.adoc +++ b/downstream/modules/platform/proc-controller-launch-job-template.adoc @@ -40,7 +40,7 @@ Ensure that you complete the tabs in the order that the prompts appear. When launching, {ControllerName} automatically redirects the web browser to the *Job Status* page for this job under the *Jobs* tab. You can re-launch the most recent job from the list view to re-run on all hosts or just failed hosts in the specified inventory. -For more information, see the xref:controller-jobs[Jobs] section. +For more information, see the xref:controller-jobs[Jobs in automation controller] section. When slice jobs are running, job lists display the workflow and job slices, and a link to view their details individually. diff --git a/downstream/modules/platform/proc-controller-management-notifications.adoc b/downstream/modules/platform/proc-controller-management-notifications.adoc index 77f5daf727..d4c3e6fe94 100644 --- a/downstream/modules/platform/proc-controller-management-notifications.adoc +++ b/downstream/modules/platform/proc-controller-management-notifications.adoc @@ -9,7 +9,7 @@ Use the following procedure to review or set notifications associated with a man //image:management-job-notifications.png[Notifications] -If none exist, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-notifications#controller-create-notification-template[Creating a notification template] in the _{ControllerUG}_. +If none exist, see link:{URLControllerUserGuide}/controller-notifications#controller-create-notification-template[Creating a notification template] in _{ControllerUG}_. image:management-job-notifications-empty.png[No notifications set] diff --git a/downstream/modules/platform/proc-controller-obtaining-subscriptions.adoc b/downstream/modules/platform/proc-controller-obtaining-subscriptions.adoc index e7d3ac4f82..de661f0fc1 100644 --- a/downstream/modules/platform/proc-controller-obtaining-subscriptions.adoc +++ b/downstream/modules/platform/proc-controller-obtaining-subscriptions.adoc @@ -20,7 +20,7 @@ endif::controller-GS,controller-AG[] ** Enter your username and password on the license page. ** Obtain a subscriptions manifest from the link:https://access.redhat.com/management/subscription_allocations[Subscription Allocations] page on the Red Hat Customer Portal. ifdef::controller-GS,controller-AG[] -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#proc-controller-obtaining-subscriptions-manifest[Obtaining a subscriptions manifest] in the _{ControllerUG}_. +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#proc-controller-obtaining-subscriptions-manifest[Obtaining a subscriptions manifest] in _{ControllerUG}_. endif::controller-GS,controller-AG[] ifdef::controller-UG[] For more information, see xref:proc-controller-obtaining-subscriptions-manifest[Obtaining a subscriptions manifest]. diff --git a/downstream/modules/platform/proc-controller-pass-extra-variables-provisioning-callbacks.adoc b/downstream/modules/platform/proc-controller-pass-extra-variables-provisioning-callbacks.adoc index a67a91385b..84c2be73e7 100644 --- a/downstream/modules/platform/proc-controller-pass-extra-variables-provisioning-callbacks.adoc +++ b/downstream/modules/platform/proc-controller-pass-extra-variables-provisioning-callbacks.adoc @@ -22,4 +22,4 @@ root@localhost:~$ curl -f -H 'Content-Type: application/json' -XPOST \ https:///api/v2/job_templates/7/callback ---- -For more information, see link:https://docs.ansible.com/automation-controller/4.4/html/administration/tipsandtricks.html#launch-jobs-curl[Launching Jobs with Curl] in the _{ControllerAG}_. \ No newline at end of file +For more information, see link:{URLControllerAdminGuide}/controller-tips-and-tricks#ref-controller-launch-jobs-with-curl[Launching Jobs with Curl] in _{ControllerAG}_. \ No newline at end of file diff --git a/downstream/modules/platform/proc-controller-remediate-insights-inventory.adoc b/downstream/modules/platform/proc-controller-remediate-insights-inventory.adoc index bb3b50abb3..5bb95c2212 100644 --- a/downstream/modules/platform/proc-controller-remediate-insights-inventory.adoc +++ b/downstream/modules/platform/proc-controller-remediate-insights-inventory.adoc @@ -24,7 +24,7 @@ The credential does not have to be a Red Hat Insights credential. + image::ug-insights-create-job-template.png[Insights job template] + -. Click btn:[Save]. +. Click btn:[Create job template]. . Click the launch image:rightrocket.png[Launch,15,15] icon to launch the job template. When complete, the job results in the *Job Details* page. diff --git a/downstream/modules/platform/proc-controller-reset-tower-base.adoc b/downstream/modules/platform/proc-controller-reset-tower-base.adoc index 4f7d249732..f011000e58 100644 --- a/downstream/modules/platform/proc-controller-reset-tower-base.adoc +++ b/downstream/modules/platform/proc-controller-reset-tower-base.adoc @@ -14,6 +14,6 @@ Use the following procedure to reset `TOWER_URL_BASE` if the wrong address has b . From the navigation panel, select menu:{MenuAEAdminSettings}[System]. . Click btn:[Edit]. -. Enter the address in the *Base URL of the service* field for the DNS entry you wish to appear in notifications. +. Enter the address in the *Base URL of the service* field for the DNS entry you want to appear in notifications. //[ddacosta] Subscription is not an option from the Settings menu in the controller test environment. Need to verify where this lives and if it changes for 2.5 //. Re-add your license in menu:Settings[Subscription settings]. diff --git a/downstream/modules/platform/proc-controller-review-organizations.adoc b/downstream/modules/platform/proc-controller-review-organizations.adoc index 06c5f47376..3d023bb6c9 100644 --- a/downstream/modules/platform/proc-controller-review-organizations.adoc +++ b/downstream/modules/platform/proc-controller-review-organizations.adoc @@ -1,23 +1,15 @@ +:_mod-docs-content-type: PROCEDURE + [id="controller-review-organizations"] -= Reviewing the organization += Organizations list view -The Organizations page displays the existing organizations for your installation. +The *Organizations* page displays the existing organizations for your installation. From here, you can search for a specific organization, filter the list of organizations, or change the sort order for the list. .Procedure -* From the navigation panel, select {MenuControllerOrganizations}. -+ -[NOTE] -==== -{ControllerNameStart} automatically creates a default organization. -If you have a Self-support level license, you have only the default organization available and must not delete it. -==== -You can use the default organization as it is initially set up and edit it later. -+ -[NOTE] -==== -Only Enterprise or Premium licenses can add new organizations. -==== - -Enterprise and Premium license users who want to add a new organization should see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#assembly-controller-organizations[Organizations] section in the _{ControllerUG}_. +. From the navigation panel, select menu:{MenuAMOrganizations}. +. In the Search bar, enter an appropriate keyword for the organization you want to search for and click the arrow icon. +. From the menu bar, you can sort the list of organizations by using the arrows for *Name* to toggle your sorting preference. +. You can also sort the list by selecting *Name*, *Created* or *Last modified* from the *Sort* list. +. You can view organization details by clicking an organization *Name* on the *Organizations* page. diff --git a/downstream/modules/platform/proc-controller-run-ad-hoc-commands.adoc b/downstream/modules/platform/proc-controller-run-ad-hoc-commands.adoc index 9c1f4bbb8a..b7f2f0c910 100644 --- a/downstream/modules/platform/proc-controller-run-ad-hoc-commands.adoc +++ b/downstream/modules/platform/proc-controller-run-ad-hoc-commands.adoc @@ -39,7 +39,7 @@ The Run command window opens. To target all hosts in the inventory enter `all` or `*`, or leave the field blank. This is automatically populated with whatever was selected in the previous view before clicking the launch button. * *Machine Credential*: Select the credential to use when accessing the remote hosts to run the command. -Choose the credential containing the username and SSH key or password that Ansible needs to log into the remote hosts. +Choose the credential containing the username and SSH key or password that Ansible needs to log in to the remote hosts. * *Verbosity*: Select a verbosity level for the standard output. * *Forks*: If needed, select the number of parallel or simultaneous processes to use while executing the command. * *Show Changes*: Select to enable the display of Ansible changes in the diff --git a/downstream/modules/platform/proc-controller-run-job-template.adoc b/downstream/modules/platform/proc-controller-run-job-template.adoc index e91e5173d8..3c9b992825 100644 --- a/downstream/modules/platform/proc-controller-run-job-template.adoc +++ b/downstream/modules/platform/proc-controller-run-job-template.adoc @@ -2,8 +2,8 @@ = Running a job template -A benefit of {ControllerName} is the push-button deployment of Ansible playbooks. -You can configure a template to store all the parameters that you would normally pass to the Ansible playbook on the command line. +A benefit of {ControllerName} is the push-button deployment of Ansible Playbooks. +You can configure a template to store all the parameters that you would normally pass to the Ansible Playbook on the command line. In addition to the playbooks, the template passes the inventory, credentials, extra variables, and all options and settings that you can specify on the command line. .Procedure @@ -14,7 +14,7 @@ image::controller-gs-job-templates-launch.png[Launch template] The initial job start generates a status page, which updates automatically by using {ControllerName}'s Live Event feature, until the job is complete. -For more information about the job results, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#controller-jobs[Jobs in automation controller] in the _{ControllerUG}_. +For more information about the job results, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#controller-jobs[Jobs in automation controller] in _{ControllerUG}_. .Additional resources diff --git a/downstream/modules/platform/proc-controller-select-capacity.adoc b/downstream/modules/platform/proc-controller-select-capacity.adoc index ec32dbbb35..bf0d8ba7d3 100644 --- a/downstream/modules/platform/proc-controller-select-capacity.adoc +++ b/downstream/modules/platform/proc-controller-select-capacity.adoc @@ -21,8 +21,9 @@ A value of 0.5 is a 50/50 balance between the two algorithms, which is 18: View or edit the capacity: -. From the *Instances Groups* list view, select the desired instance. -. Select the *Instances* tab and adjust the *Capacity Adjustment* slider. +. From the navigation panel, select {MenuInfrastructureInstanceGroups}. +. On the *Instance Groups* list view, select the required instance. +. Select the *Instances* tab and adjust the *Capacity adjustment* slider. + [NOTE] ==== diff --git a/downstream/modules/platform/proc-controller-set-up-LDAP.adoc b/downstream/modules/platform/proc-controller-set-up-LDAP.adoc index 079666dfbb..5e27935f04 100644 --- a/downstream/modules/platform/proc-controller-set-up-LDAP.adoc +++ b/downstream/modules/platform/proc-controller-set-up-LDAP.adoc @@ -1,176 +1,81 @@ +:_mod-docs-content-type: PROCEDURE + [id="controller-set-up-LDAP"] -= Setting up LDAP authentication += Configuring LDAP authentication -When configured, a user who logs in with an LDAP username and password automatically has an {ControllerName} account created for them. -They can be automatically placed into organizations as either regular users or organization administrators. +As a platform administrator, you can configure LDAP as the source for account authentication information for {PlatformNameShort} users. -Users created in the user interface (Local) take precedence over those logging into {ControllerName} for their first time with an alternative authentication solution. -You must delete the local user if you want to re-use with another authentication method, such as LDAP. +[NOTE] +==== +If the LDAP server you want to connect to has a certificate that is self-signed or signed by a corporate internal certificate authority (CA), the CA certificate must be added to the system’s trusted CAs. Otherwise, connection to the LDAP server will result in an error that the certificate issuer is not recognized. +==== -Users created through an LDAP login cannot change their username, given name, surname, or set a local password for themselves. -You can also configure this to restrict editing of other field names. +When LDAP is configured, an account is created for any user who logs in with an LDAP username and password and they can be automatically placed into organizations as either regular users or organization administrators. -[NOTE] +Users created through an LDAP login should not change their username, first name, last name, or set a local password for themselves. Any changes made to this information is overwritten the next time the user logs in to the platform. + +[IMPORTANT] ==== -If the LDAP server you want to connect to has a certificate that is self-signed or signed by a corporate internal certificate authority (CA), -you must add the CA certificate to the system's trusted CAs. -Otherwise, connection to the LDAP server results in an error that the certificate issuer is not recognized. -For more information, see xref:controller-import-CA-cert-LDAP[Importing a certificate authority in {ControllerName} for LDAPS integration]. -If prompted, use your Red Hat customer credentials to login. +Migration of LDAP authentication settings are not supported for 2.4 to 2.5 in the platform UI. If you are upgrading from {PlatformNameShort} 2.4 to 2.5, be sure to save your authentication provider data before upgrading. ==== .Procedure -. Create a user in LDAP that has access to read the entire LDAP structure. -. Use the `ldapsearch` command to test if you can make successful queries to the LDAP server. -You can install this tool from {ControllerName}'s system command line, and by using other Linux and OSX systems. -+ -.Example +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *LDAP* from the *Authentication type* list and click btn:[Next]. +. Enter a *Name* for this LDAP configuration. The configuration name is required, must be unique across all authenticators, and must not be longer than 512 characters. +. In the *LDAP Server URI* field, enter or modify the list of LDAP servers to which you want to connect. This field supports multiple addresses. +. In the *LDAP Bind DN text* field, enter the Distinguished Name (DN) to specify the user that the {PlatformNameShort} uses to connect to the LDAP server. For example: + -[literal, options="nowrap" subs="+attributes"] ---- -ldapsearch -x -H ldap://win -D "CN=josie,CN=Users,DC=website,DC=com" -b "dc=website,dc=com" -w Josie4Cloud +CN=josie,CN=users,DC=website,DC=com ---- -In this example, `CN=josie,CN=users,DC=website,DC=com` is the distinguished name of the connecting user. -+ -[NOTE] -==== -The `ldapsearch` utility is not automatically pre-installed with {ControllerName}. -However, you can install it from the `openldap-clients` package. -==== -+ -. From the navigation panel, select {MenuAEAdminSettings} in the {ControllerName} UI. -. Select *LDAP settings* in the list of *Authentication* options. -+ -You do not need multiple LDAP configurations per LDAP server, but you can configure many LDAP servers from this page, otherwise, leave the server at *Default*. + -The equivalent API endpoints show `AUTH_LDAP_*` repeated: `AUTH_LDAP_1_*`, `AUTH_LDAP_2_*`, `AUTH_LDAP_5_*` to denote server designations. -. To enter or change the LDAP server address, click btn:[Edit] and enter in the *LDAP Server URI* field by using the same format as the one pre-populated in the text field. +. In the *LDAP Bind Password* text field, enter the password to use for the binding user. +. Select a group type from the *LDAP Group Type* list. The *LDAP Group Type* list includes the following groups: ++ +* `PosixGroupType` +* `GroupOfNamesType` +* `GroupOfUniqueNamesType` +* `ActiveDirectoryGroupType` +* `OrganizationalRoleGroupType` +* `MemberDNGroupType` +* `NISGroupType` +* `NestedGroupOfNamesType` +* `NestedGroupOfUniqueNamesType` +* `NestedActiveDirectoryGroupType` +* `NestedOrganizationalRoleGroupType` +* `NestedMemberDNGroupType` +* `PosixUIDGroupType` + [NOTE] ==== -You can specify multiple LDAP servers by separating each with spaces or commas. Click the image:question_circle.png[Tooltip,12,12] icon to comply with the correct syntax and rules. +The group types that are supported by {PlatformNameShort} use the underlying link:https://django-auth-ldap.readthedocs.io/en/latest/reference.html#django_auth_ldap.config.LDAPGroupType[django-auth-ldap library]. To specify the parameters for the selected group type, see Step 13 of this procedure. ==== +. To use *LDAP User DN Template* as an alternative to user search, enter the name of the template. This approach is more efficient for user lookups than searching if it is usable in your organizational environment. If this setting has a value it will be used instead of the *LDAP User Search* setting. +. *LDAP Start TLS* is disabled by default. To enable TLS when the LDAP connection is not using SSL, set the switch to *On*. + -. Enter the password to use for the binding user in the *LDAP Bind Password* text field. -For more information about LDAP variables, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/appendix-inventory-files-vars#ref-hub-variables[Ansible automation hub variables]. -. Click to select a group type from the *LDAP Group Type* list. -+ -The LDAP group types that are supported by {ControllerName} use the underlying link:https://django-auth-ldap.readthedocs.io/en/latest/groups.html#types-of-groups[django-auth-ldap library]. -To specify the parameters for the selected group type, see Step 15. -. The *LDAP Start TLS* is disabled by default. -To enable TLS when the LDAP connection is not using SSL/TLS, set the toggle to *On*. -. Enter the distinguished name in the *LDAP Bind DN* text field to specify the user that {ControllerName} uses to connect (Bind) to the LDAP server. -* If that name is stored in key `sAMAccountName`, the *LDAP User DN Template* is populated from `(sAMAccountName=%(user)s)`. -Active Directory stores the username to `sAMAccountName`. -For OpenLDAP, the key is `uid` and the line becomes `(uid=%(user)s)`. -. Enter the distinguished group name to enable users within that group to access {ControllerName} in the *LDAP Require Group* field, using the same format as the one shown in the text field, `CN=controller Users,OU=Users,DC=website,DC=com`. -. Enter the distinguished group name to prevent users within that group from accessing {ControllerName} in the *LDAP Deny Group* field, using the same format as the one shown in the text field. -. Enter where to search for users while authenticating in the *LDAP User Search* field by using the same format as the one shown in the text field. -In this example, use: -+ -[literal, options="nowrap" subs="+attributes"] ----- -[ -"OU=Users,DC=website,DC=com", -"SCOPE_SUBTREE", -"(cn=%(user)s)" -] ----- -+ -The first line specifies where to search for users in the LDAP tree. -In the earlier example, the users are searched recursively starting from `DC=website,DC=com`. -+ -The second line specifies the scope where the users should be searched: -+ -* *SCOPE_BASE*: Use this value to indicate searching only the entry at the base DN, resulting in only that entry being returned. -* *SCOPE_ONELEVEL*: Use this value to indicate searching all entries one level under the base DN, but not including the base DN and not including any entries under that one level under the base DN. -* *SCOPE_SUBTREE*: Use this value to indicate searching of all entries at all levels under and including the specified base DN. +include::snippets/snip-gw-authentication-additional-auth-fields.adoc[] + -The third line specifies the key name where the user name is stored. +. Enter any *LDAP Connection Options* to set for the LDAP connection. +. Depending on the selected *LDAP Group Type*, different parameters are available in the *LDAP Group Type Parameters* field to account for this. `LDAP_GROUP_TYPE_PARAMS` is a dictionary, which is converted to `kwargs` and passed to the *LDAP Group Type* class selected. There are two common parameters used by group types: `name_attr` and `member_attr`. Where `name_attr` defaults to `cn` and `member_attr` defaults to `member`: + -For many search queries, use the following correct syntax: -+ -[literal, options="nowrap" subs="+attributes"] ----- -[ - [ - "OU=Users,DC=northamerica,DC=acme,DC=com", - "SCOPE_SUBTREE", - "(sAMAccountName=%(user)s)" - ], - [ - "OU=Users,DC=apac,DC=corp,DC=com", - "SCOPE_SUBTREE", - "(sAMAccountName=%(user)s)" - ], - [ - "OU=Users,DC=emea,DC=corp,DC=com", - "SCOPE_SUBTREE", - "(sAMAccountName=%(user)s)" - ] -] ----- -+ -. In the *LDAP Group Search* text field, specify which groups to search and how to search them. In this example, use: -+ -[literal, options="nowrap" subs="+attributes"] ----- -[ -"dc=example,dc=com", -"SCOPE_SUBTREE", -"(objectClass=group)" - ] ----- -+ -* The first line specifies the BASE DN where the groups should be searched. -* The second line specifies the scope and is the same as that for the user directive. -* The third line specifies what the `objectClass` of a group object is in the LDAP that you are using. -+ -. Enter the user attributes in the *LDAP User Attribute Map* the text field. -In this example, use: -+ -[literal, options="nowrap" subs="+attributes"] ----- -{ -"first_name": "givenName", -"last_name": "sn", -"email": "mail" -} ----- -+ -The earlier example retrieves users by surname from the key `sn`. -You can use the same LDAP query for the user to decide what keys they are stored under. -+ -Depending on the selected *LDAP Group Type*, different parameters are available in the *LDAP Group Type Parameters* field to account for this. -`LDAP_GROUP_TYPE_PARAMS` is a dictionary that is converted by {ControllerName} to `kwargs` and passed to the *LDAP Group Type* class selected. -There are two common parameters used by any of the *LDAP Group Type*; `name_attr` and `member_attr`. -Where `name_attr defaults` to cn and `member_attr` defaults to member: -+ -[literal, options="nowrap" subs="+attributes"] ---- {"name_attr": "cn", "member_attr": "member"} ---- + -To find what parameters a specific *LDAP Group Type* expects, see the link:https://django-auth-ldap.readthedocs.io/en/latest/reference.html#django_auth_ldap.config.LDAPGroupType[django_auth_ldap] documentation around the classes `init` parameters. +To determine the parameters that a specific *LDAP Group Type* requires, refer to the link:https://django-auth-ldap.readthedocs.io/en/latest/reference.html#django_auth_ldap.config.LDAPGroupType[django_auth_ldap documentation] on the classes `init` parameters. + -. Enter the user profile flags in the *LDAP User Flags by Group* text field. -The following example uses the syntax to set LDAP users as "Superusers" and "Auditors": +. In the *LDAP Group Search* field, specify which groups should be searched and how to search them. +. In the *LDAP User Attribute Map* field, enter user attributes to map LDAP fields to your {PlatformNameShort} users, for example, `email` or `first_name`. +. In the *LDAP User Search* field, enter where to search for users during authentication. + -[literal, options="nowrap" subs="+attributes"] ----- -{ -"is_superuser": "cn=superusers,ou=groups,dc=website,dc=com", -"is_system_auditor": "cn=auditors,ou=groups,dc=website,dc=com" -} ----- +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] + -. For more information about completing the mapping fields, *LDAP Organization Map* and *LDAP Team Map*, see the xref:controller-LDAP-organization-team-mapping[LDAP Organization and team mapping] section. -. Click btn:[Save]. +. Click btn:[Next]. -[NOTE] -==== -{ControllerNameStart} does not actively synchronize users, but they are created during their initial login. -To improve performance associated with LDAP authentication, see xref:controller-prevent-LDAP-attributes[Preventing LDAP attributes from updating on each login]. -==== +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] diff --git a/downstream/modules/platform/proc-controller-set-up-SAML.adoc b/downstream/modules/platform/proc-controller-set-up-SAML.adoc index a20e34455d..c4fa950f51 100644 --- a/downstream/modules/platform/proc-controller-set-up-SAML.adoc +++ b/downstream/modules/platform/proc-controller-set-up-SAML.adoc @@ -1,79 +1,70 @@ [id="controller-set-up-SAML"] -= SAML authentication += Configuring SAML authentication -SAML enables the exchange of authentication and authorization data between an Identity Provider (IdP - a system of servers that provide the Single Sign On service) and a service provider, in this case, {ControllerName}. +SAML allows the exchange of authentication and authorization data between an Identity Provider (IdP) and a Service Provider (SP). +{PlatformNameShort} is a SAML SP that you can configure to talk with one or more SAML IdPs to authenticate users. -You can configure {ControllerName} to communicate with SAML to authenticate (create/login/logout) {ControllerName} users. -You can embed User, Team, and Organization membership in the SAML response to {ControllerName}. +Based on groups and attributes optionally provided by the SAML IdP, users can be placed into teams and organizations in {PlatformNameShort} based on the authenticator maps tied to this authenticator. This mapping ensures that when a user logs in through SAML, {PlatformNameShort} can correctly identify the user and assign the proper attributes like first name, last name, email, and group membership. -image::ag-configure-auth-saml-topology.png[SAML topology] +.Prerequisites + +Before you configure SAML authentication in {PlatformNameShort}, be sure you do the following: + +* Configure a SAML Identity Provider (IdP). +* Pre-configure the SAML IdP with the settings required for integration with {PlatformNameShort}. For example, in Microsoft Entra ID you can configure the following: +** *Identifier (Entity ID):* This can be any value that you want, but it needs to match the one configured in your {PlatformNameShort}. +** *Reply URL (Assertion Consumer Service (ACS) URL):* This URL is auto generated when the SAML method is configured in {PlatformNameShort}. That value must be copied from {PlatformNameShort} and pasted in your IdP settings. +* Gather the user attributes for your SAML IdP application. Different IdPs might use different attribute names and formats. Refer to documentation for your specific IdP for the exact attribute names and the expected values. +* Generate a private key and public certificate using the following command: ++ +----- +$ openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 3650 -nodes +----- -The following instructions describe {ControllerName} as the service provider. -To authenticate users through RHSSO (keycloak), see link:https://www.ansible.com/blog/red-hat-single-sign-on-integration-with-ansible-tower[Red Hat Single Sign On Integration with the Automation Controller]. .Procedure -. From the navigation panel, select {MenuAEAdminSettings}. -. Select *SAML settings* from the list of *Authentication* options. +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *SAML* from the *Authentication type* list and click btn:[Next]. +. Enter a *Name* for this SAML configuration. +. Enter the application-defined unique identifier used as the audience of the SAML service provider configuration in the *SAML Service Provider Entity ID* field. This is usually the base URL of your service provider, but the actual value depends on the Entity ID expected by your IdP. +. Include the certificate content in the *SAML Service Provider Public Certificate* field. This information is contained in the cert.pem you created as a prerequisite and must include the `—–BEGIN CERTIFICATE—–` and `—–END CERTIFICATE—–`. +. Include the private key content in the *SAML Service Provider Private Key* field. This information is contained in the key.pem you created as a prerequisite and must include the `—–BEGIN PRIVATE KEY—–` and `—–END PRIVATE KEY—–`. +. Enter the URL to redirect the user to for login initiation in the *IdP Login URL* field. This is the login URL from your SAML IdP application. +. Enter the public cert used for secrets coming from the *IdP in the IdP Public Cert* field. This is the SAML certificate available for download from IdP. + [NOTE] ==== -The *SAML Assertion Consume Service (ACS) URL* and *SAML Service Provider Metadata URL* fields are pre-populated and are non-editable. Contact the IdP administrator and provide the information contained in these fields. +The IdP in the IdP Public Cert field should contain the entire certificate, including the `—–BEGIN CERTIFICATE—–` and `—–END CERTIFICATE—–`. You must manually enter the prefix and suffix if the IdP does not include it. ==== -. Click btn:[Edit] and set the *SAML Service Provider Entity ID* to be the same as the *Base URL* of the {ControllerName} host field, found in the *Miscellaneous System settings* screen. -You can view it through the API in the `/api/v2/settings/system`, under the `CONTROLLER_BASE_URL` variable. -You can set the *Entity ID* to any one of the individual {ControllerName} cluster nodes, but it is good practice to set it to the URL of the service provider. -Ensure that the *Base URL* matches the FQDN of the load balancer, if used. ++ +. Enter the entity ID returned in the assertion in the *Entity ID*. This is the identifier from your IdP SAML application. You can find this value in the SAML metadata provided by your IdP. +. Enter user details in the *Groups*, *User Email*, *Username*, *User Last Name*, *User First Name* and *User Permanent ID* fields. + [NOTE] ==== -The *Base URL* is different for each node in a cluster. -A load balancer often sits in front of {ControllerName} cluster nodes to provide a single entry point, the {ControllerName} Cluster FQDN. -The SAML service provider must be able establish an outbound connection and route to the {ControllerName} Cluster Node or the {ControllerName} Cluster FQDN that you set in the *SAML Service Provider Entity ID*. +Additional attributes might be available through your SAML IdP. Those values must be included in either the *Additional Authenticators Fields* or the *SAML IDP to extra_data attribute mapping* field. Refer to those steps for details. ==== + -In the following example, the service provider is the {ControllerName} cluster, and therefore, the ID is set to the {ControllerName} Cluster FQDN: +. The *SAML Assertion Consumer Service (ACS) URL* field registers the service as a service provider (SP) with each identity provider (IdP) you have configured. Leave this field blank. After you save this authentication method, it is auto generated. This field must match the *Reply URL* setting in your IdP. +. Optional: Enter any *Additional Authenticator Fields* that this authenticator can take. These fields are not validated and are passed directly back to the authenticator. +For example, to ensure all SAML IdP attributes other than Email, Username, Last Name, First Name are included for mapping, enter the following: + -image::configure-auth-saml-service-provider.png[SAML service provider] +----- +GET_ALL_EXTRA_DATA: true +----- + -. Create a server certificate for the Ansible cluster. -Typically when an Ansible cluster is configured, the {ControllerName} nodes are configured to handle HTTP traffic only and the load balancer is an SSL Termination Point. -In this case, an SSL certificate is required for the load balancer, and not for the individual {ControllerName} Cluster Nodes. -You can enable or disable SSL per individual {ControllerName} node, but you must disable it when using an SSL terminated load balancer. -Use a non-expiring self signed certificate to avoid periodically updating certificates. -This way, authentication does not fail in case someone forgets to update the certificate. +Alternatively, you can include a list of SAML IdP attributes in the *SAML IDP to extra_data attribute mapping* field. + [NOTE] ==== -The *SAML Service Provider Public Certificate* field must contain the entire certificate, including the `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`. +Values defined in this field override the dedicated fields provided in the UI. Any values not defined here are not provided to the authenticator. ==== + -If you are using a CA bundle with your certificate, include the entire bundle in this field. -+ -.Example -+ -[literal, options="nowrap" subs="+attributes"] ----- ------BEGIN CERTIFICATE----- -... cert text ... ------END CERTIFICATE----- ----- -+ -. Create an optional private key for the controller to use as a service provider and enter it in the *SAML Service Provider Private Key* field. -+ -.Example +. In the *SAML Service Provider Organization Info* field, provide the URL, display name, and the name of your app. + -[literal, options="nowrap" subs="+attributes"] ----- ------BEGIN PRIVATE KEY----- -... key text ... ------END PRIVATE KEY----- ----- -+ -. Provide the IdP with details about the {ControllerName} cluster during the SSO process in the *SAML Service Provider Organization Info* field: -+ -[literal, options="nowrap" subs="+attributes"] ---- { "en-US": { @@ -84,15 +75,8 @@ If you are using a CA bundle with your certificate, include the entire bundle in } ---- + -[IMPORTANT] -==== -You must complete these fields to configure SAML correctly within {ControllerName}. -==== +. In the *SAML Service Provider Technical Contact* field, give the name and email address of the technical contact for your service provider. + -. Provide the IdP with the technical contact information in the *SAML Service Provider Technical Contact* field. -Do not remove the contents of this field: -+ -[literal, options="nowrap" subs="+attributes"] ---- { "givenName": "Some User", @@ -100,285 +84,68 @@ Do not remove the contents of this field: } ---- + -. Provide the IdP with the support contact information in the *SAML Service Provider Support Contact* field. -Do not remove the contents of this field: +. In the *SAML Service Provider Support Contact* field, give the name and email address of the support contact for your service provider. + -[literal, options="nowrap" subs="+attributes"] ----- +---- { "givenName": "Some User", "emailAddress": "suser@example.com" } ---- + -. In the *SAML Enabled Identity Providers* field, provide information on how to connect to each IdP listed. -The following example shows what {ControllerName} expects SAML attributes to be: -+ -[literal, options="nowrap" subs="+attributes"] ----- -Username(urn:oid:0.9.2342.19200300.100.1.1) -Email(urn:oid:0.9.2342.19200300.100.1.3) -FirstName(urn:oid:2.5.4.42) -LastName(urn:oid:2.5.4.4) ----- -+ -If these attributes are not known, map existing SAML attributes to `Username`, `Email`, `FirstName`, and `LastName`. -+ -Configure the required keys for each IdP: -+ -* `attr_user_permanent_id` - The unique identifier for the user. -It can be configured to match any of the attributes sent from the IdP. -It is normally set to `name_id` if the `SAML:nameid` attribute is sent to the {ControllerName} node. -It can be the username attribute or a custom unique identifier. -* `entity_id` - The Entity ID provided by the IdP administrator. -The administrator creates a SAML profile for {ControllerName} and it generates a unique URL. -* `url`- The Single Sign On (SSO) URL that {ControllerName} redirects the user to, when SSO is activated. -* `x509_cert` - The certificate provided by the IdP administrator that is generated from the SAML profile created on the IdP. -Remove the `---BEGIN CERTIFICATE---` and `---END CERTIFICATE---` headers, then enter the certificate as one non-breaking string. -+ -Multiple SAML IdPs are supported. -Some IdPs might provide user data using attribute names that differ from the default OIDs. -The SAML NameID is a special attribute used by some IdPs to tell the service provider (the {ControllerName} cluster) what the unique user identifier is. -If it is used, set the `attr_user_permanent_id` to `name_id` as shown in the following example. -Other attribute names can be overridden for each IdP: -+ -[literal, options="nowrap" subs="+attributes"] ----- -"myidp": { - "entity_id": "https://idp.example.com", - "url": "https://myidp.example.com/sso", - "x509cert": "" -}, -"onelogin": { - "entity_id": "https://app.onelogin.com/saml/metadata/123456", - "url": "https://example.onelogin.com/trust/saml2/http-post/sso/123456", -"x509cert": "", - "attr_user_permanent_id": "name_id", - "attr_first_name": "User.FirstName", - "attr_last_name": "User.LastName", - "attr_username": "User.email", - "attr_email": "User.email" - } -} ----- -+ -[WARNING] -==== -Do not create a SAML user that shares the same email with another user (including a non-SAML user). -Doing so results in the accounts being merged. -Note that this same behavior exists for system administrators. -Therefore, a SAML login with the same email address as the system administrator can login with system administrator privileges. -To avoid this, you can remove (or add) administrator privileges based on SAML mappings. -==== -+ -. Optional: Provide the *SAML Organization Map*. -For more information, see xref:ref-controller-organization-mapping[Organization mapping] and xref:ref-controller-team-mapping[Team mapping]. -. You can configure {ControllerName} to look for particular attributes that contain Team and Organization membership to associate with users when they log into {ControllerName}. -The attribute names are defined in the *SAML Organization Attribute Mapping* and the *SAML Team Attribute Mapping* fields. -+ -.Example SAML Organization Attribute Mapping -+ -The following is an example SAML attribute that embeds user organization membership in the attribute `member-of`: -+ -[literal, options="nowrap" subs="+attributes"] ----- - - - Engineering - IT - HR - Sales - - - Engineering - - ----- -+ -The following is the corresponding {ControllerName} configuration: -+ -[literal, options="nowrap" subs="+attributes"] ----- -{ - "saml_attr": "member-of", - "saml_admin_attr": "admin-of", - "remove": true, - "remove_admins": false -} ----- -+ -* `saml_attr`: The SAML attribute name where the organization array can be found and `remove` is set to `true` to remove a user from all organizations before adding the user to the list of organizations. -To keep the user in the organizations they are in while adding the user to the organizations in the SAML attribute, set `remove` to `false`. -* `saml_admin_attr`: Similar to the `saml_attr` attribute, but instead of conveying organization membership, this attribute conveys administrator organization permissions. -+ -.Example SAML Team Attribute Mapping -+ -The following example is another SAML attribute that contains a team membership in a list: +. Optional: Provide extra configuration data in the *SAML Service Provider extra configuration data* field. For example, you can choose to enable signing requests for added security: + -[literal, options="nowrap" subs="+attributes"] ----- - - - member - staff - - +----- { - "saml_attr": "eduPersonAffiliation", - "remove": true, - "team_org_map": [ - { - "team": "member", - "organization": "Default1" - }, - { - "team": "staff", - "organization": "Default2" - } - ] +"sign_request": True, } ----- +----- + -* `saml_attr`: The SAML attribute name where the team array can be found. -* `remove`: Set `remove` to `true` to remove the user from all teams before adding the user to the list of teams. -To keep the user in the teams they are in while adding the user to the teams in the SAML attribute, set `remove` to `false`. -* `team_org_map`: An array of dictionaries of the form `{ "team": "", "organization": "" }` that defines mapping from controller Team -> {ControllerName} organization. -You need this because the same named team can exist in multiple organizations in {ControllerName}. -The organization to which a team listed in a SAML attribute belongs to is ambiguous without this mapping. +This field is the equivalent to the `SOCIAL_AUTH_SAML_SP_EXTRA` in the API. For more information, see link:https://github.com/SAML-Toolkits/python-saml#settings[OneLogin’s SAML Python Toolkit] to learn about the valid service provider extra (SP_EXTRA) parameters. +. Optional: Provide security settings in the *SAML Security Config* field. This field is the equivalent to the `SOCIAL_AUTH_SAML_SECURITY_CONFIG` field in the API. + -You can create an alias to override both teams and organizations in the *SAML Team Attribute Mapping* field. -This option is useful in cases when the SAML backend sends out complex group names, as show in the following example: -+ -[literal, options="nowrap" subs="+attributes"] ---- -{ - "remove": false, - "team_org_map": [ - { - "team": "internal:unix:domain:admins", - "organization": "Default", - "team_alias": "Administrators" - }, - { - "team": "Domain Users", - "organization_alias": "OrgAlias", - "organization": "Default" - } - ], - "saml_attr": "member-of" -} +// Indicates whether the messages sent by this SP // will be signed. [Metadata of the SP will offer this info] +"authnRequestsSigned": false, + +// Indicates a requirement for the , // and elements received by this SP to be signed. +"wantMessagesSigned": false, + +// Indicates a requirement for the elements received by // this SP to be signed. [Metadata of the SP will offer this info] +"wantAssertionsSigned": false, + +// Authentication context. +// Set to false and no AuthContext will be sent in the AuthNRequest, +// Set true or don't present this parameter and you will get an AuthContext 'exact' 'urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport' +// Set an array with the possible auth context values: array ('urn:oasis:names:tc:SAML:2.0:ac:classes:Password', 'urn:oasis:names:tc:SAML:2.0:ac:classes:X509'), +"requestedAuthnContext": true, ---- +For more information and additional options, see link:https://github.com/SAML-Toolkits/python-saml#settings[OneLogin's SAML Python Toolkit]. + -Once the user authenticates, {ControllerName} creates organization and team aliases. -+ -. Optional: Provide team membership mapping in the *SAML Team Map* field. -For more information, see xref:ref-controller-organization-mapping[Organization mapping] and xref:ref-controller-team-mapping[Team Mapping]. -. Optional: Provide security settings in the *SAML Security Config* field. -This field is the equivalent to the `SOCIAL_AUTH_SAML_SECURITY_CONFIG` field in the API. -For more information, see link:https://github.com/SAML-Toolkits/python-saml#settings[OneLogin's SAML Python Toolkit]. -+ -{ControllerNameStart} uses the `python-social-auth` library when users log in through SAML. -This library relies on the `python-saml` library to make the settings available for the next two optional fields, *SAML Service Provider extra configuration data* and *SAML IDP to extra_data attribute mapping*. +. Optional: In the *SAML IDP to extra_data attribute mapping* field, enter values to map IDP attributes to extra_data attributes. These values will include additional user information beyond standard attributes like Email or Username to be mapped. For example: + -* The *SAML Service Provider extra configuration data* field is equivalent to the `SOCIAL_AUTH_SAML_SP_EXTRA` in the API. -For more information, see link:https://github.com/SAML-Toolkits/python-saml#settings[OneLogin's SAML Python Toolkit] to learn about the valid service provider extra (`SP_EXTRA`) parameters. -* The *SAML IDP to extra_data attribute mapping* field is equivalent to the `SOCIAL_AUTH_SAML_EXTRA_DATA` in the API. -For more information, see Python's SAML link:https://python-social-auth.readthedocs.io/en/latest/backends/saml.html#advanced-settings[Advanced Settings] documentation. -* The *SAML User Flags Attribute Mapping* field enables you to map SAML roles and attributes to special user flags. -The following attributes are valid in this field: -** `is_superuser_role`: Specifies one or more SAML roles which grants a user the superuser flag. -** `is_superuser_attr`: Specifies a SAML attribute which grants a user the superuser flag. -** `is_superuser_value`: Specifies one or more values required for `is_superuser_attr` that is required for the user to be a superuser. -** `remove_superusers`: Boolean indicating if the superuser flag should be removed for users or not. -This defaults to `true`. -** `is_system_auditor_role`: Specifies one or more SAML roles which will grant a user the system auditor flag. -** `is_system_auditor_attr`: Specifies a SAML attribute which will grant a user the system auditor flag. -** `is_system_auditor_value`: Specifies one or more values required for `is_system_auditor_attr` that is required for the user to be a system auditor. -** `remove_system_auditors`: Boolean indicating if the `system_auditor` flag should be removed for users or not. -This defaults to `true`. +----- +- Department +- UserType +- Organization +----- + -The `role` and `value` fields are lists and are 'OR' logic. -If you specify two roles: [ "Role 1", "Role 2" ] and the SAML user has either role, the logic considers them to have the required role for the flag. -This is the same with the `value` field, if you specify: [ "Value 1", "Value 2"] and the SAML user has either value for their attribute the logic considers their attribute value to have matched. +For more information on the values you can include, see link:https://python-social-auth.readthedocs.io/en/latest/backends/saml.html#advanced-settings[advanced SAML settings]. + -If you specify `role` and `attr` for either `superuser` or `system_auditor`, the settings for `attr` take precedence over a role. -System administrators and System auditor roles are evaluated at login for a SAML user. -If you grant a SAML user one of these roles through the UI and not through the SAML settings, the roles are removed on the user's next login unless the `remove` flag is set to `false`. -The `remove` flag, if `false`, never enables the SAML adapter to remove the corresponding flag from a user. -The following table describes how the logic works: -+ -[cols="33%,33%,33%,33%,33%,33%",options="header"] -|=== -| *Has one or more roles* | *Has `attr`* | *Has one or more `attr Values`* | *Remove flag* | *Previous Flag* | *Is flagged* -| No | No | N/A | True | False | No -| No | No | N/A | False | False | No -| No | No | N/A | True | True | No -| No | No | N/A | False | True | Yes -| Yes | No | N/A | True | False | Yes -| Yes | No | N/A | False | False | Yes -| Yes | No | N/A | True | True | Yes -| Yes | No | N/A | False | False | Yes -| No | Yes | Yes | True | True | Yes -| No | Yes | Yes | True | False | Yes -| No | Yes | Yes | False | False | Yes -| No | Yes | Yes | True | True | Yes -| No | Yes | Yes | False | True | Yes -| No | Yes | No | True | False | No -| No | Yes | No | False | False | No -| No | Yes | No | True | True | No -| No | Yes | No | False | True | Yes -| No | Yes | Unset | True | False | Yes -| No | Yes | Unset | False | False | Yes -| No | Yes | Unset | True | True | Yes -| No | Yes | Unset | False | True | Yes -| Yes | Yes | Yes | True | False | Yes -| Yes | Yes | Yes | False | False | Yes -| Yes | Yes | Yes | True | True | Yes -| Yes | Yes | Yes | False | True | Yes -| Yes | Yes | No | True | False | No -| Yes | Yes | No | False | False | No -| Yes | Yes | No | True | True | No -| Yes | Yes | No | False | True | Yes -| Yes | Yes | Unset | True | False | Yes -| Yes | Yes | Unset | False | False | Yes -| Yes | Yes | Unset | True | True | Yes -| Yes | Yes | Unset | False | True | Yes -|=== -+ -Each time a SAML user authenticates to {ControllerName}, these checks are performed and the user flags are altered as needed. -If `System Administrator` or `System Auditor` is set for a SAML user within the UI, the SAML adapter overrides the UI setting based on the preceding rules. -If you prefer that the user flags for SAML users do not get removed when a SAML user logs in, you can set the `remove_` flag to `false`. -With the `remove` flag set to `false`, a user flag set to `true` through either the UI, API or SAML adapter is not removed. -However, if a user does not have the flag, and the preceding rules determine the flag should be added, it is added, even if the flag is `false`. +[IMPORTANT] +==== +Make sure you include all relevant values so that everything gets mapped correctly for your configuration. Alternatively, you can include the `GET_ALL_EXTRA_DATA: true` in the *Additional Authenticator Fields* to allow mapping of all available SAML IdP attributes. +==== + -.Example +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] + -[literal, options="nowrap" subs="+attributes"] ----- -{ - "is_superuser_attr": "blueGroups", - "is_superuser_role": ["is_superuser"], - "is_superuser_value": ["cn=My-Sys-Admins,ou=memberlist,ou=mygroups,o=myco.com"], - "is_system_auditor_attr": "blueGroups", - "is_system_auditor_role": ["is_system_auditor"], - "is_system_auditor_value": ["cn=My-Auditors,ou=memberlist,ou=mygroups,o=myco.com"] -} ----- -. Click btn:[Save]. +. Click btn:[Next]. -.Verification -To verify that the authentication is configured correctly, load the auto-generated URL found in the *SAML Service Provider Metadata URL* into a browser. -If you do not get XML output, you have not configured it correctly. - -Alternatively, logout of {ControllerName} and the login screen displays the SAML logo to indicate it as a alternate method of logging into {ControllerName}: +[IMPORTANT] +==== +You can configure an HTTPS redirect for SAML in operator-based deployments to simplify login for your users. For the steps to configure this setting, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/installing_on_openshift_container_platform/index#proc-operator-enable-https-redirect[Enabling single sign-on (SSO) for {Gateway} on {OCPShort}]. +==== -image::ag-configure-auth-saml-logo.png[SAML logo] +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] diff --git a/downstream/modules/platform/proc-controller-set-up-azure.adoc b/downstream/modules/platform/proc-controller-set-up-azure.adoc index 8a58dca61f..040b40b11e 100644 --- a/downstream/modules/platform/proc-controller-set-up-azure.adoc +++ b/downstream/modules/platform/proc-controller-set-up-azure.adoc @@ -1,29 +1,69 @@ [id="controller-set-up-azure"] +ifndef::controller-AG[] += Configuring {MSEntraID} authentication +endif::[] +ifdef::controller-AG[] = {Azure} active directory authentication +endif::controller-AG[] -To set up enterprise authentication for {Azure} Active Directory (AD), you need to obtain an OAuth2 key and secret by registering your organization-owned application from Azure at: -https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app. +ifndef::controller-AG[] +To set up enterprise authentication for {MSEntraID}, formerly known as {Azure} Active Directory (AD), you need to obtain an OAuth2 key and secret by registering your organization-owned application from Azure using the link:https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app[Quickstart: Register an application with the Microsoft identity platform]. +endif::[] +ifdef::controller-AG[] +To set up enterprise authentication for {Azure} Active Directory (AD), you need to obtain an OAuth2 key and secret by registering your organization-owned application from Azure at: https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app[Quickstart: Register an application with the Microsoft identity platform]. +endif::[] -Each key and secret must belong to a unique application and cannot be shared or reused between different authentication backends. -To register the application, you must supply it with your webpage URL, which is the Callback URL shown in the *Authentication* tab of the *Settings* screen. +Each key and secret must belong to a unique application and cannot be shared or reused between different authentication backends. To register the application, you must supply it with your webpage URL, which is the Callback URL shown in the Authenticator details for your authenticator configuration. +ifndef::Controller-AG[] +See xref:gw-display-auth-details[Displaying authenticator details] for instructions on accessing this information. +endif::[] .Procedure -. From the navigation panel, select {MenuAEAdminSettings}. +ifndef::controller-AG[] +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *Azuread* from the *Authentication type* list and click btn:[Next]. +. Enter a *Name* for this authentication configuration. +. Click btn:[Edit], copy and paste Microsoft's *Application (Client) ID* to the *OIDC Key* field. ++ +Following instructions for link:https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app[registering your application with the Microsoft identity platform], supply the key (shown at one time only) to the client for authentication. ++ +. Copy and paste the secret key created for your {MSEntraID}/{Azure} AD application to the *OIDC Secret* field. ++ +include::snippets/snip-gw-authentication-additional-auth-fields.adoc[] ++ +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] ++ +. Click btn:[Next]. + +include::snippets/snip-gw-authentication-verification.adoc[] + +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] + +[role="_additional-resources"] +.Additional resources +For application registering basics in {MSEntraID}/{Azure} AD, see the link:https://learn.microsoft.com/en-us/entra/identity-platform/v2-overview[What is the Microsoft identity platform?] overview. +endif::[] + +ifdef::controller-AG[] +. From the navigation panel, select menu:Settings[]. . Select *Azure AD settings* from the list of *Authentication* options. + [NOTE] ==== -The *Azure AD OAuth2 Callback URL* field is already pre-populated and non-editable. -Once the application is registered, {Azure} displays the Application ID and Object ID. +The *Azure AD OAuth2 Callback URL* field is already pre-populated and non-editable. +Once the application is registered, Azure displays the Application ID and Object ID. ==== -. Click btn:[Edit], copy and paste {Azure}'s Application ID to the *Azure AD OAuth2 Key* field. +. Click btn:[Edit], copy and paste Azure's Application ID to the *Azure AD OAuth2 Key* field. + -Following {Azure} AD's documentation for connecting your application to {Azure} Active Directory, supply the key (shown at one time only) to the client for authentication. +Following Azure AD's documentation for connecting your app to {Azure} Active Directory, supply the key (shown at one time only) to the client for authentication. + -. Copy and paste the secret key created for your {Azure} AD application to the *Azure AD OAuth2 Secret* field of the *Settings - Authentication* screen. -. For more information on completing the {Azure} AD OAuth2 Organization Map and {Azure} AD OAuth2 Team Map fields, see xref:ref-controller-organization-mapping[Organization mapping] and xref:ref-controller-team-mapping[Team Mapping]. +. Copy and paste the secret key created for your Azure AD application to the *Azure AD OAuth2 Secret* field of the Settings - Authentication screen. +. For more information on completing the Azure AD OAuth2 Organization Map and Azure AD OAuth2 Team Map fields, see xref:ref-controller-organization-mapping[Organization mapping] and xref:ref-controller-team-mapping[Team mapping]. . Click btn:[Save]. .Verification @@ -32,4 +72,5 @@ To verify that the authentication is configured correctly, log out of {Controlle image::ag-configure-auth-azure-logo.png[Azure AD logo] .Additional resources -For application registering basics in {Azure} AD, see the link:https://learn.microsoft.com/en-us/entra/identity-platform/v2-overview[What is the Microsoft identity platform?] overview. +For application registering basics in Azure AD, see the link:https://learn.microsoft.com/en-us/entra/identity-platform/v2-overview[What is the Microsoft identity platform?] overview. +endif::[] diff --git a/downstream/modules/platform/proc-controller-set-up-generic-oidc.adoc b/downstream/modules/platform/proc-controller-set-up-generic-oidc.adoc index 798c18b5d6..ef5ece1f48 100644 --- a/downstream/modules/platform/proc-controller-set-up-generic-oidc.adoc +++ b/downstream/modules/platform/proc-controller-set-up-generic-oidc.adoc @@ -1,34 +1,56 @@ +:_mod-docs-content-type: PROCEDURE + [id="controller-set-up-generic-oidc"] -= Generic OIDC authentication += Configuring generic OIDC authentication -OpenID Connect (OIDC) uses the OAuth 2.0 framework. -It enables third-party applications to verify the identity and obtain basic end-user information. -The main difference between OIDC and SAML is that SAML has a service provider (SP)-to-IdP trust relationship, whereas OIDC establishes the trust with the channel (HTTPS) that is used to obtain the security token. -To obtain the credentials needed to set up OIDC with {ControllerName}, see the documentation from the IdP of your choice that has OIDC support. +OpenID Connect (OIDC) uses the OAuth 2.0 framework. It enables third-party applications to verify the identity and obtain basic end-user information. The main difference between OIDC and SAML is that SAML has a service provider (SP)-to-IdP trust relationship, whereas OIDC establishes the trust with the channel (HTTPS) that is used to obtain the security token. To obtain the credentials needed to set up OIDC with {PlatformNameShort}, see the documentation from the IdP of your choice that has OIDC support. .Procedure -. From the navigation panel, select {MenuAEAdminSettings}. -. Select *Generic OIDC settings* from the list of *Authentication* options. -. Click btn:[Edit] and enter the following information: +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *Generic OIDC* from the *Authentication type* list and click btn:[Next]. +. Enter a *Name* for this authentication configuration. +. Enter the following information: ++ +* *OIDC Provider URL*: The URL for your OIDC provider. * *OIDC Key*: The client ID from your third-party IdP. * *OIDC Secret*: The client secret from your IdP. -* *OIDC Provider URL*: The URL for your OIDC provider. -* *Verify OIDC Provider Certificate*: Use the toggle to enable or disable the OIDC provider SSL certificate verification. -. Click btn:[Save]. + -[NOTE] -==== -Team and organization mappings for OIDC are currently not supported. -The OIDC adapter does authentication only and not authorization. -It is only capable of authenticating whether this user is who they say they are. -It does not authorize what this user is enabled to do. -Configuring generic OIDC creates the UserID appended with an ID or key to differentiate the same user ID originating from two different sources and therefore, considered different users. -So you get an ID of just the user name and the second is the username-. -==== +. Optional: Select the HTTP method to be used when requesting an access token from the *Access Token Method* list. The default method is *POST*. +. Optionally enter information for the following fields using the tooltips provided for instructions and required format: ++ +* *Access Token Method* - The default method is *POST*. +* *Access Token URL* +* *Access Token Method* +* *Authorization URL* +* *ID Key* +* *ID Token Issuer* +* *JWKS URI* +* *OIDC Public Key* +* *Revoke Token Method* - The default method is *GET*. +* *Revoke Token URL* +* *Response Type* +* *Token Endpoint Auth Method* +* *Userinfo URL* +* *Username Key* ++ +. Use the *Verify OIDC Provider Certificate* to enable or disable the OIDC provider SSL certificate verification. +. Use the *Redirect* State to enable or disable the state parameter in the redirect URI. It is recommended that this is enabled to prevent Cross Site Request Forgery (CSRF) attacks. ++ +include::snippets/snip-gw-authentication-additional-auth-fields.adoc[] ++ +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] ++ +. Click btn:[Next]. -.Verification -To verify that the authentication is configured correctly, logout of {ControllerName} and the login screen displays the OIDC logo to indicate it as a alternative method of logging into {ControllerName}: +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] -image:ag-configure-auth-oidc-logo.png[OIDClogo] +// [ddacosta - removed as no longer true in 2.5.] +// [NOTE] +// ==== +// Team and organization mappings for OIDC are currently not supported. The OIDC adapter does authentication only and not authorization. It is only capable of authenticating whether this user is who they say they are. It does not authorize what this user is enabled to do. Configuring generic OIDC creates the UserID appended with an ID or key to differentiate the same user ID originating from two different sources and therefore, considered different users. So you get an ID of just the user name and the second is the username-. +// ==== diff --git a/downstream/modules/platform/proc-controller-set-up-github-webhook.adoc b/downstream/modules/platform/proc-controller-set-up-github-webhook.adoc index c98a1466b5..32ebbc1ec8 100644 --- a/downstream/modules/platform/proc-controller-set-up-github-webhook.adoc +++ b/downstream/modules/platform/proc-controller-set-up-github-webhook.adoc @@ -28,7 +28,7 @@ You cannot access this token again in GitHub. ==== + . Use the PAT to optionally create a GitHub credential: -.. Go to your instance, and xref:ref-controller-credential-gitHub-pat[Create a new credential for the GitHub PAT] using the generated token. +.. Go to your instance and create a new credential for the GitHub PAT, using the generated token. .. Make note of the name of this credential, as you use it in the job template that posts back to GitHub. + image::ug-webhooks-github-PAT-token.png[GitHub PAT token] @@ -41,7 +41,7 @@ image::ug-webhooks-webhook-credential.png[GitLab webhook credential] . Go to a GitHub repository where you want to configure webhooks and select menu:Settings[]. . From the navigation panel, select menu:Webhooks[Add webhook]. . To complete the *Add webhook* page, you must check the *Enable Webhook* option in a job template or workflow job template. -For more information, see step 3 in both xref:controller-create-job-template[Creating a job template] and xref:controller-create-workflow-template[Creating a workflow template]. +For more information, see step 3 in both xref:controller-create-job-template[Creating a job template] and xref:controller-create-workflow-template[Creating a workflow job template]. . Complete the following fields: * *Payload URL*: Copy the contents of the *Webhook URL* from the job template and paste it here. The results are sent to this address from GitHub. @@ -56,7 +56,7 @@ image::ug-webhooks-github-repo-choose-events.png[Github repo choose events] * *Active*: Leave this checked. . Click btn:[Add webhook]. . When your webhook is configured, it is displayed in the list of webhooks active for your repository, along with the ability to edit or delete it. -Click on a webhook, to go to the *Manage webhook* screen. +Click a webhook, to go to the *Manage webhook* screen. . Scroll to view the delivery attempts made to your webhook and whether they succeeded or failed. .Additional resources diff --git a/downstream/modules/platform/proc-controller-set-up-gitlab-webhook.adoc b/downstream/modules/platform/proc-controller-set-up-gitlab-webhook.adoc index 036754e83f..aec2388653 100644 --- a/downstream/modules/platform/proc-controller-set-up-gitlab-webhook.adoc +++ b/downstream/modules/platform/proc-controller-set-up-gitlab-webhook.adoc @@ -24,7 +24,7 @@ You cannot access this token again in GitLab. ==== + . Use the PAT to optionally create a GitLab credential: -.. Go to your instance, and xref:ref-controller-credential-gitLab-pat[create a new credential for the GitLab PAT] using the generated token. +.. Go to your instance, and create a new credential for the GitLab PAT, using the generated token. .. Make note of the name of this credential, as you use it in the job template that posts back to GitLab. + image::ug-webhooks-create-credential-gitlab-PAT-token.png[GitLab PAT token] @@ -37,7 +37,7 @@ image::ug-gitlab-webhook-credential.png[GitLab webhook credential] . Go to a GitLab repository where you want to configure webhooks. . From the navigation panel, select menu:Settings[Integrations]. . To complete the *Add webhook* page, you must check the *Enable Webhook* option in a job template or workflow job template. -For more information, see step 3 in both xref:controller-create-job-template[Creating a job template] and xref:controller-create-workflow-template[Creating a workflow template]. +For more information, see step 3 in both xref:controller-create-job-template[Creating a job template] and xref:controller-create-workflow-template[Creating a workflow job template]. . Complete the following fields: * *URL*: Copy the contents of the *Webhook URL* from the job template and paste it here. The results are sent to this address from GitLab. diff --git a/downstream/modules/platform/proc-controller-set-up-logging.adoc b/downstream/modules/platform/proc-controller-set-up-logging.adoc index 62a05363fe..16ade851b5 100644 --- a/downstream/modules/platform/proc-controller-set-up-logging.adoc +++ b/downstream/modules/platform/proc-controller-set-up-logging.adoc @@ -1,13 +1,22 @@ [id="proc-controller-set-up-logging"] - -= Setting Up Logging +ifdef::controller-AG[] += Setting up logging Use the following procedure to set up logging to any of the aggregator types. +endif::controller-AG[] +ifdef::hardening[] +To set up logging to any of the aggregator types for centralized logging follow these steps: +endif::hardening[] .Procedure . From the navigation panel, select {MenuSetLogging}. . On the *Logging settings* page, click btn:[Edit]. -. Set the following configurable options: +ifdef::controller-AG[] ++ +image::logging-settings.png[Logging settings page] ++ +endif::controller-AG[] +. You can configure the following options: * *Logging Aggregator*: Enter the hostname or IP address that you want to send logs to. * *Logging Aggregator Port*: Specify the port for the aggregator if it requires one. @@ -19,10 +28,12 @@ However, TCP and UDP connections are determined by the hostname and port number Therefore, in the case of a TCP or UDP connection, supply the port in the specified field. If a URL is entered in the *Logging Aggregator* field instead, its hostname portion is extracted as the hostname. ==== ++ * *Logging Aggregator Type*: Click to select the aggregator service from the list: +ifdef::controller-AG[] + image:configure-controller-system-logging-types.png[Logging types] - +endif::controller-AG[] * *Logging Aggregator Username*: Enter the username of the logging aggregator if required. * *Logging Aggregator Password/Token*: Enter the password of the logging aggregator if required. * *Loggers to Send Data to the Log Aggregator Form*: All four types of data are pre-populated by default. @@ -43,20 +54,26 @@ Equivalent to the `rsyslogd queue.maxdiskspace` setting on the action (e.g. `omh It stores files in the directory specified by `LOG_AGGREGATOR_MAX_DISK_USAGE_PATH`. * *File system location for rsyslogd disk persistence*: Location to persist logs that should be retried after an outage of the external log aggregator (defaults to `/var/lib/awx`). Equivalent to the `rsyslogd queue.spoolDirectory` setting. +ifdef::controller-AG[] * *Log Format For API 4XX Errors*: Configure a specific error message. For more information, see xref:proc-controller-api-4xx-error-config[API 4XX Error Configuration]. - +endif::controller-AG[] +ifdef::hardening[] +* *Log Format For API 4XX Errors*: Configure a specific error message. For more information, see link:{URLControllerAdminGuide}assembly-controller-logging-aggregation#proc-controller-api-4xx-error-config[API 4XX Error Configuration] +endif::hardening[] Set the following options: * *Log System Tracking Facts Individually*: Click the tooltip image:question_circle.png[Help,15,15] icon for additional information, such as whether or not you want to turn it on, or leave it off by default. . Review your entries for your chosen logging aggregation. +ifdef::controller-AG[] The following example is set up for Splunk: + image:configure-controller-system-logging-splunk-example.png[Splunk logging example] +endif::controller-AG[] * *Enable External Logging*: Select this checkbox if you want to send logs to an external log aggregator. * *Enable/disable HTTPS certificate verification*: Certificate verification is enabled by default for the HTTPS log protocol. -Select this checkbox if yoiu want the log handler to verify the HTTPS certificate sent by the external log aggregator before establishing a connection. +Select this checkbox if you want the log handler to verify the HTTPS certificate sent by the external log aggregator before establishing a connection. * *Enable rsyslogd debugging*: Select this checkbox to enable high verbosity debugging for `rsyslogd`. Useful for debugging connection issues for external log aggregation. diff --git a/downstream/modules/platform/proc-controller-set-up-prometheus.adoc b/downstream/modules/platform/proc-controller-set-up-prometheus.adoc index 086ba85fde..2de6f471a4 100644 --- a/downstream/modules/platform/proc-controller-set-up-prometheus.adoc +++ b/downstream/modules/platform/proc-controller-set-up-prometheus.adoc @@ -15,7 +15,7 @@ Alternatively, you can provide an OAuth2 token (which can be generated at `/api/ By default, the configuration assumes a user with username=`admin` and password=`password`. ==== + -Using an OAuth2 Token, created at the `/api/v2/tokens` endpoint to authenticate Prometheus with {ControllerName}, the following example provides a valid scrape configuration if the URL for your {ControllerName}'s metrics endpoint is `https://controller_host:443/metrics`. +Using an OAuth2 Token, created at the `/api/v2/tokens` endpoint to authenticate Prometheus with {ControllerName}, the following example provides a valid scrape configuration if the URL for your {ControllerName}'s metrics endpoint is `/https://controller_host:443/metrics`. + [literal, options="nowrap" subs="+attributes"] ---- @@ -40,7 +40,7 @@ For help configuring other aspects of Prometheus, such as alerts and service dis + If Prometheus is already running, you must restart it to apply the configuration changes by making a *POST* to the reload endpoint, or by killing the Prometheus process or service. -. Use a browser to navigate to your graph in the Prometheus UI at `http://:9090/graph` and test out some queries. +. Use a browser to navigate to your graph in the Prometheus UI at `/http://:9090/graph` and test out some queries. For example, you can query the current number of active {ControllerName} user sessions by executing: `awx_sessions_total{type="user"}`. + image:metrics-prometheus-ui-query-example.png[Prometheus queries] diff --git a/downstream/modules/platform/proc-controller-set-up-radius.adoc b/downstream/modules/platform/proc-controller-set-up-radius.adoc index 2ebd8d2c57..ed64bfd691 100644 --- a/downstream/modules/platform/proc-controller-set-up-radius.adoc +++ b/downstream/modules/platform/proc-controller-set-up-radius.adoc @@ -1,14 +1,24 @@ [id="controller-set-up-radius"] -= RADIUS authentication += Configuring RADIUS authentication -You can configure {ControllerName} to centrally use RADIUS as a source for authentication information. +You can configure {PlatformNameShort} to centrally use RADIUS as a source for authentication information. .Procedure -. From the navigation panel, select {MenuAEAdminSettings}. -. Select *RADIUS settings* from the list of *Authentication* options. -. Click btn:[Edit] and enter the host or IP of the RADIUS server in the *RADIUS Server* field. -If you leave this field blank, RADIUS authentication is disabled. -. Enter the port and secret information in the next two fields. -. Click btn:[Save]. +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *Radius* from the *Authentication type* list and click btn:[Next]. +. Click btn:[Create authentication]. +. Enter the host or IP of the RADIUS server in the *RADIUS Server* field. If you leave this field blank, RADIUS authentication is disabled. +. Enter the *Shared secret for authenticating to RADIUS server*. ++ +include::snippets/snip-gw-authentication-additional-auth-fields.adoc[] ++ +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] ++ +. Click btn:[Next]. + +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] diff --git a/downstream/modules/platform/proc-controller-set-up-tacacs+.adoc b/downstream/modules/platform/proc-controller-set-up-tacacs+.adoc index e739aa513e..7b9fdb56b1 100644 --- a/downstream/modules/platform/proc-controller-set-up-tacacs+.adoc +++ b/downstream/modules/platform/proc-controller-set-up-tacacs+.adoc @@ -1,9 +1,8 @@ [id="controller-set-up-tacacs"] -= TACACS Plus authentication += Configuring TACACS+ authentication -Terminal Access Controller Access-Control System Plus (TACACS+) is a protocol that handles remote authentication and related services for networked access control through a centralized server. -TACACS+ provides authentication, authorization and accounting (AAA) services, in which you can configure {ControllerName} to use as a source for authentication. +Terminal Access Controller Access-Control System Plus (TACACS+) is a protocol that handles remote authentication and related services for networked access control through a centralized server. TACACS+ provides authentication, authorization and accounting (AAA) services, in which you can configure Ansible Automation Platform to use as a source for authentication. [NOTE] ==== @@ -11,15 +10,24 @@ This feature is deprecated and will be removed in a future release. ==== .Procedure -. From the navigation panel, select {MenuAEAdminSettings}. -. Select *TACACs+ settings* from the list of *Authentication* options. -. Click btn:[Edit] and enter the following information: -* *TACACS+ Server*: Provide the hostname or IP address of the TACACS+ server with which to authenticate. -If you leave this field blank, TACACS+ authentication is disabled. -* *TACACS+ Port*: TACACS+ uses port 49 by default, which is already pre-populated. -* *TACACS+ Secret*: The secret key for TACACS+ authentication server. -* *TACACS+ Auth Session Timeout*: The session timeout value in seconds. -The default is 5 seconds. -* *TACACS+ Authentication Protocol*: The protocol used by the TACACS+ client. -The options are *ascii* or *pap*. -. Click btn:[Save]. + +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *TACACS+* from the *Authentication type* list and click btn:[Next]. +. Enter a *Name* for this TACACS+ configuration. +. Enter the following information: ++ +* Hostname of TACACS+ Server: Provide the hostname or IP address of the TACACS+ server with which to authenticate. If you leave this field blank, TACACS+ authentication is disabled. +* TACACS+ Authentication Protocol: The protocol used by the TACACS+ client. The options are ascii or pap. +* Shared secret for authenticating to TACACS+ server: The secret key for TACACS+ authentication server. +. The *TACACS+ client address sending enabled* is disabled by default. To enable client address sending, select the checkbox. ++ +include::snippets/snip-gw-authentication-additional-auth-fields.adoc[] ++ +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] ++ +Click btn:[Next]. + +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] \ No newline at end of file diff --git a/downstream/modules/platform/proc-controller-sourced-from-project.adoc b/downstream/modules/platform/proc-controller-sourced-from-project.adoc index 20c0f7b1c9..b5c9e132a4 100644 --- a/downstream/modules/platform/proc-controller-sourced-from-project.adoc +++ b/downstream/modules/platform/proc-controller-sourced-from-project.adoc @@ -10,32 +10,33 @@ Use the following procedure to configure a project-sourced inventory: .Procedure . From the navigation panel, select {MenuInfrastructureInventories}. . Select the inventory name you want a source to and click the *Sources* tab. -. Click btn:[Add source]. -. In the *Add new source* page, select *Sourced from a Project* from the *Source* list. +. Click btn:[Create source]. +. In the *Create source* page, select *Sourced from a Project* from the *Source* list. . Enter the following details in the additional fields: -* Optional: *Source Control Branch/Tag/Commit*: Enter the SCM branch, tags, commit hashes, arbitrary refs, or revision number (if applicable) from the source control (Git or Subversion) to checkout. +* Optional: *Source control branch/tag/commit*: Enter the SCM branch, tags, commit hashes, arbitrary refs, or revision number (if applicable) from the source control (Git or Subversion) to checkout. + -This field only displays if the sourced project has the *Allow Branch Override* option checked. For further information, see xref:proc-scm-git-subversion[SCM Types - Git and Subversion]. +This field only displays if the sourced project has the *Allow branch override* option checked. +For further information, see xref:proc-scm-git-subversion[SSCM Types - Configuring playbooks to use Git and Subversion]. + image:projects-create-scm-project-branch-override-checked.png[Allow branch override] + -Some commit hashes and refs might not be available unless you also provide a custom refspec in the next field. +Some commit hashes and refs might not be available unless you also give a custom refspec in the next field. If left blank, the default is HEAD which is the last checked out Branch/Tag/Commit for this project. * Optional: *Credential*: Specify the credential to use for this source. * *Project* (required): Pre-populates with a default project, otherwise, specify the project this inventory is using as its source. Click the image:search.png[Search,15,15] icon to choose from a list of projects. If the list is extensive, use the search to narrow the options. -* *Inventory File* (required): Select an inventory file associated with the sourced project. +* *Inventory file* (required): Select an inventory file associated with the sourced project. If not already populated, you can type it into the text field within the menu to filter extraneous file types. In addition to a flat file inventory, you can point to a directory or an inventory script. + image:inventories-create-source-sourced-from-project-filter.png[image] . Optional: You can specify the verbosity, host filter, enabled variable/value, and update options as described in xref:proc-controller-add-source[Adding a source]. -. Optional: To pass to the custom inventory script, you can set environment variables in the *Source Variables* field. +. Optional: To pass to the custom inventory script, you can set environment variables in the *Source variables* field. You can also place inventory scripts in source control and then run it from a project. -For more information, see link:{BaseURL}red_hat_ansible_automation_platform/2.4/html-single/automation_controller_administration_guide/index#assembly-inventory-file-importing[Inventory File Importing] in _{ControllerAG}_. +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/configuring_automation_execution/assembly-inventory-file-importing[Inventory File Importing] in _{ControllerAG}_. //+ //image:inventories-create-source-sourced-from-project-example.png[Inventories - create source - sourced from project example] diff --git a/downstream/modules/platform/proc-controller-use-an-exec-env.adoc b/downstream/modules/platform/proc-controller-use-an-exec-env.adoc index 16ecd31ed1..71980c122f 100644 --- a/downstream/modules/platform/proc-controller-use-an-exec-env.adoc +++ b/downstream/modules/platform/proc-controller-use-an-exec-env.adoc @@ -3,8 +3,8 @@ = Adding an {ExecEnvShort} to a job template .Prerequisites - -* An {ExecEnvShort} must have been created using ansible-builder as described in xref:ref-controller-building-exec-env[Build an {ExecEnvShort}]. +//[ddacosta converting xref to a link because this content is shared in multiple docs] +* An {ExecEnvShort} must have been created using ansible-builder as described in link:{URLControllerUserGuide}/assembly-controller-execution-environments#ref-controller-build-exec-envs[Build an {ExecEnvShort}]. When an {ExecEnvShort} has been created, you can use it to run jobs. Use the {ControllerName} UI to specify the {ExecEnvShort} to use in your job templates. * Depending on whether an {ExecEnvShort} is made available for global use or tied to an organization, you must have the appropriate level of administrator privileges to use an {ExecEnvShort} in a job. @@ -27,7 +27,7 @@ The image name requires its full location (repository), the registry, image name + [NOTE] ==== -If you do not set a typing error for pull, the value defaults to *Only pull the image if not present before running*. +If you do not set a type for pull, the value defaults to *Only pull the image if not present before running*. ==== + * Optional: *Description*: diff --git a/downstream/modules/platform/proc-controller-user-permissions.adoc b/downstream/modules/platform/proc-controller-user-permissions.adoc index c07233f547..743388013d 100644 --- a/downstream/modules/platform/proc-controller-user-permissions.adoc +++ b/downstream/modules/platform/proc-controller-user-permissions.adoc @@ -1,38 +1,27 @@ -[id="proc-controller-user-permissions"] +:_mod-docs-content-type: PROCEDURE -= Adding and removing user permissions +[id="proc-controller-user-permissions"] -To add permissions to a particular user: += Adding roles to a team -.Procedure -. From the *Users* list view, click on the name of a user. -. On the *Details* page, click btn:[Add]. -This opens the *Add user permissions* wizard. -+ -image:users-add-permissions-form.png[Add Permissions Form] -. Select the object to a assign permissions, for which the user will have access. -. Click btn:[Next]. -. Select the resource to assign team roles and click btn:[Next]. -+ -image:users-permissions-IG-select.png[image] - -. Select the resource you want to assign permissions to. -Different resources have different options available. -+ -image:users-permissions-IG-roles.png[image] - -. Click btn:[Save]. -. The *Roles* page displays the updated profile for the user with the permissions assigned for each selected resource. +You can assign permissions to teams, such as edit and administer resources and other elements. +You can set permissions through an inventory, project, job template and other resources, or within the Organizations view. [NOTE] ==== -You can also add teams, individual, or multiple users and assign them permissions at the object level. -This includes templates, credentials, inventories, projects, organizations, or instance groups. -This feature reduces the time for an organization to onboard many users at one time. +Teams can not be assigned to an organization by adding roles. Refer to the steps provided in link:{URLCentralAuth}/gw-managing-access#proc-gw-add-team-organization[Adding a team to an organization] for detailed instructions. ==== -.To remove permissions: -* Click the image:disassociate.png[Disassociate,10,10] icon next to the resource. -This launches a confirmation dialog asking you to confirm the disassociation. - - +.Procedure +. From the navigation panel, select {MenuAMTeams}. +. Select the team *Name* to which you want to add roles. +. Select the *Roles* tab and click btn:[Add roles]. ++ +include::snippets/snip-gw-roles-note-multiple-components.adoc[] ++ +. Select a *Resource type* and click btn:[Next]. +. Select the resources to receive the new roles and click btn:[Next]. +. Select the roles to apply to the resources and click btn:[Next]. +. Review the settings and click btn:[Finish]. ++ +The Add roles dialog displays indicating whether the role assignments were successfully applied, click btn:[Close] to close the dialog. diff --git a/downstream/modules/platform/proc-controller-verify-container-group.adoc b/downstream/modules/platform/proc-controller-verify-container-group.adoc index 08333ac469..8f31ea66eb 100644 --- a/downstream/modules/platform/proc-controller-verify-container-group.adoc +++ b/downstream/modules/platform/proc-controller-verify-container-group.adoc @@ -6,8 +6,8 @@ To verify the deployment and termination of your container: .Procedure -. Create a mock inventory and associate the container group to it by populating the name of the container group in the *Instance Group* field. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#proc-controller-adding-new-inventory[Add a new inventory] in the _{ControllerUG}_. +. Create a mock inventory and associate the container group to it by populating the name of the container group in the *Instance groups* field. +For more information, see xref:proc-controller-adding-new-inventory[Add a new inventory]. + image::ag-inventories-create-new-test-inventory.png[Create test inventory] + diff --git a/downstream/modules/platform/proc-controller-view-host.adoc b/downstream/modules/platform/proc-controller-view-host.adoc new file mode 100644 index 0000000000..c54e567b65 --- /dev/null +++ b/downstream/modules/platform/proc-controller-view-host.adoc @@ -0,0 +1,29 @@ +[id="proc-controller-view-host"] + += Viewing the host details + +To view the Host details for a job run. + +.Procedure + +. From the navigation panel, select {MenuInfrastructureHosts}. +The *Hosts* page displays the following information about the host or hosts affected by recent job runs. + +. Selecting a particular host displays the *Details* page for that host, with the following information: + +* The *Name* of the Host. +* The *Inventory* associated with that host. Selecting this inventory displays details of the inventory. +* When the Host was *Created* and who by. Selecting the creator displays details of the creator. +* When the Host was *Last modified*. Selecting the creator displays details of the creator. +* *Variables* associated with the Host. You can display the variables in YAML or JSON format. + +. Click btn:[Edit host] to edit details of the host. + +* Select the *Facts* tab to display facts associated with the host. +* Select the *Groups* tab to display the Groups associated with the host. +** Click btn:[Associate groups] to associate a group with the host. +* Select the *Jobs* tab to display the Jobs which ran on the host. +** Click the image:arrow.png[Expand,15,15] icon to display details of the job. ++ +image::hosts_jobs_details.png[Details of job associated with a host] + diff --git a/downstream/modules/platform/proc-controller-view-jobs-associated-with-instance-group.adoc b/downstream/modules/platform/proc-controller-view-jobs-associated-with-instance-group.adoc index 50afb6677c..906897a717 100644 --- a/downstream/modules/platform/proc-controller-view-jobs-associated-with-instance-group.adoc +++ b/downstream/modules/platform/proc-controller-view-jobs-associated-with-instance-group.adoc @@ -17,4 +17,4 @@ Each job displays the following details: .Additional resources The instances are run in accordance with instance group policies. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/controller-instance-and-container-groups#controller-instance-group-policies[Instance Group Policies] in the _{ControllerAG}_. +For more information, see xref:controller-instance-group-policies[Instance group policies]. diff --git a/downstream/modules/platform/proc-create-a-user.adoc b/downstream/modules/platform/proc-create-a-user.adoc index 09d983dc54..570f26b1a7 100644 --- a/downstream/modules/platform/proc-create-a-user.adoc +++ b/downstream/modules/platform/proc-create-a-user.adoc @@ -6,6 +6,7 @@ This procedure creates a Keycloak user, with the `hubadmin` role, that can log i .Procedure +. Log in to {OCP}. . Navigate to menu:Operator[Installed Operators]. . Select the {OperatorRHSSO} project. . Select the *Keycloak Realm* tab and click btn:[Create Keycloak User]. diff --git a/downstream/modules/platform/proc-create-keycloak-client.adoc b/downstream/modules/platform/proc-create-keycloak-client.adoc index ad9f0f5eb6..45392299a7 100644 --- a/downstream/modules/platform/proc-create-keycloak-client.adoc +++ b/downstream/modules/platform/proc-create-keycloak-client.adoc @@ -8,6 +8,7 @@ When Single Sign-On validates or issues the `OAuth` token, the client provides t .Procedure +. Log in to {OCP}. . Navigate to menu:Operator[Installed Operators]. . Select the {OperatorRHSSO} project. . Select the *Keycloak Client* tab and click btn:[Create Keycloak Client]. @@ -141,6 +142,6 @@ spec: . Click btn:[Create] and wait for the process to complete. -When {HubName} is deployed, you must update the client with the “Valid Redirect URIs” and “Web Origins” as described in xref:proc-update-rhsso-client_{context}[Updating the {RHSSO} client] +After you deploy {HubName}, you must update the client with the “Valid Redirect URIs” and “Web Origins” as described in xref:proc-update-rhsso-client_{context}[Updating the {RHSSO} client] Additionally, the client comes pre-configured with token mappers, however, if your authentication provider does not provide group data to Red Hat SSO, then the group mapping must be updated to reflect how that information is passed. This is commonly by user attribute. diff --git a/downstream/modules/platform/proc-create-keycloak-instance.adoc b/downstream/modules/platform/proc-create-keycloak-instance.adoc index 5f2be0cacf..0175d1d634 100644 --- a/downstream/modules/platform/proc-create-keycloak-instance.adoc +++ b/downstream/modules/platform/proc-create-keycloak-instance.adoc @@ -2,14 +2,15 @@ = Creating a Keycloak instance -When the {OperatorRHSSO} is installed you can create a Keycloak instance for use with {PlatformNameShort}. +After you install the {OperatorRHSSO}, you can create a Keycloak instance for use with {PlatformNameShort}. From here you provide an external Postgres or one will be created for you. .Procedure +. Log in to {OCP}. . Navigate to menu:Operator[Installed Operators]. -. Select the `rh-sso` project. +. Select the *RH-SSO* project. . Select the *{OperatorRHSSO}*. . On the {OperatorRHSSO} details page select btn:[Keycloak]. . Click btn:[Create instance]. @@ -33,7 +34,5 @@ spec: ---- + . Click btn:[Create]. - . When deployment is complete, you can use this credential to login to the administrative console. - . You can find the credentials for the administrator in the `credential-` (example keycloak) secret in the namespace. diff --git a/downstream/modules/platform/proc-create-keycloak-realm.adoc b/downstream/modules/platform/proc-create-keycloak-realm.adoc index 062cb16717..df6dce9a87 100644 --- a/downstream/modules/platform/proc-create-keycloak-realm.adoc +++ b/downstream/modules/platform/proc-create-keycloak-realm.adoc @@ -8,6 +8,7 @@ Realms are isolated from one another and can only manage and authenticate the us .Procedure +. Log in to {OCP}. . Navigate to menu:Operator[Installed Operators]. . Select the *{OperatorRHSSO}* project. . Select the *Keycloak Realm* tab and click btn:[Create Keycloak Realm]. diff --git a/downstream/modules/platform/proc-create-postresql-secret.adoc b/downstream/modules/platform/proc-create-postresql-secret.adoc index 5fa04cfbf7..a77ab2764a 100644 --- a/downstream/modules/platform/proc-create-postresql-secret.adoc +++ b/downstream/modules/platform/proc-create-postresql-secret.adoc @@ -8,7 +8,7 @@ For migration to be successful, you must provide access to the database for your .Procedure -. Create a yaml file for your postgresql configuration secret: +. Create a YAML file for your postgresql configuration secret: + ----- apiVersion: v1 diff --git a/downstream/modules/platform/proc-create-secret-key-secret.adoc b/downstream/modules/platform/proc-create-secret-key-secret.adoc index c52ff4b2ca..5548a8c7ed 100644 --- a/downstream/modules/platform/proc-create-secret-key-secret.adoc +++ b/downstream/modules/platform/proc-create-secret-key-secret.adoc @@ -4,25 +4,65 @@ [role=_abstract] -To migrate your data to {OperatorPlatform} on {OCPShort}, you must create a secret key that matches the secret key defined in the inventory file during your initial installation. Otherwise, the migrated data will remain encrypted and unusable after migration. +To migrate your data to {OperatorPlatformNameShort} on {OCPShort}, you must create a secret key. +If you are migrating {ControllerName}, {HubName}, and {EDAName} you must have a secret key for each that matches the secret key defined in the inventory file during your initial installation. +Otherwise, the migrated data remains encrypted and unusable after migration. + +[NOTE] +==== +When specifying the symmetric encryption secret key on the custom resources, note that for {ControllerName} the field is called `secret_key_name`. But for {HubName} and {EDAName}, the field is called `db_fields_encryption_secret`. + +==== + +[NOTE] +==== +In the Kubernetes secrets, {ControllerName} and {EDAName} use the same stringData key (`secret_key`) but, {HubName} uses a different key (`database_fields.symmetric.key`). +==== .Procedure -. Locate the old secret key in the inventory file you used to deploy {PlatformNameShort} in your previous installation. -. Create a yaml file for your secret key: +. Locate the old secret keys in the inventory file you used to deploy {PlatformNameShort} in your previous installation. +. Create a YAML file for your secret keys: + ----- +--- +apiVersion: v1 +kind: Secret +metadata: + name: -secret-key + namespace: +stringData: + secret_key: +type: Opaque +--- apiVersion: v1 kind: Secret metadata: - name: -secret-key + name: -secret-key namespace: stringData: - secret_key: + secret_key: type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: -secret-key + namespace: +stringData: + database_fields.symmetric.key: +type: Opaque + ----- -. Apply the secret key yaml to the cluster: ++ +[NOTE] +==== +If `admin_password_secret` is not provided, the operator looks for a secret named `-admin-password` for the admin password. +If it is not present, the operator generates a password and create a secret from it named `-admin-password`. +==== ++ +. Apply the secret key YAML to the cluster: + ----- -oc apply -f +oc apply -f ----- diff --git a/downstream/modules/platform/proc-creating-a-new-web-server-to-host-repositories.adoc b/downstream/modules/platform/proc-creating-a-new-web-server-to-host-repositories.adoc index fd2c10c520..220635f333 100644 --- a/downstream/modules/platform/proc-creating-a-new-web-server-to-host-repositories.adoc +++ b/downstream/modules/platform/proc-creating-a-new-web-server-to-host-repositories.adoc @@ -57,7 +57,7 @@ $ sudo firewall-cmd --zone=public --add-service=http –add-service=https --perm $ sudo firewall-cmd --reload ---- -. On {ControllerName} and {HubName}, add a repo file at __/etc/yum.repos.d/local.repo__, and add the optional repos if needed: +. On automation services, add a repo file at __/etc/yum.repos.d/local.repo__, and add the optional repos if needed: + ---- [Local-BaseOS] diff --git a/downstream/modules/platform/proc-creating-a-secret.adoc b/downstream/modules/platform/proc-creating-a-secret.adoc index f5a60b5173..efefaa9855 100644 --- a/downstream/modules/platform/proc-creating-a-secret.adoc +++ b/downstream/modules/platform/proc-creating-a-secret.adoc @@ -33,6 +33,6 @@ stringData: + <1> This name is used in the next step when creating the {HubName} instance. <2> If the secret was changed when creating the Keycloak client for {HubName} be sure to change this value to match. -<3> Enter the value of the `public_key` copied in xref:proc-installing-the-ansible-platform-operator_{context}[Installing the {PlatformNameShort} Operator]. +<3> Enter the value of the `public_key` for your {OperatorPlatformNameShort} deployment. . Click btn:[Create] and wait for the process to complete. diff --git a/downstream/modules/platform/proc-creating-ansible-role.adoc b/downstream/modules/platform/proc-creating-ansible-role.adoc new file mode 100644 index 0000000000..53841a3755 --- /dev/null +++ b/downstream/modules/platform/proc-creating-ansible-role.adoc @@ -0,0 +1,57 @@ +[id="creating-ansible-role_{context}"] + += Creating a role + +You can create roles using the {Galaxy} CLI tool, which is included with your {PlatformNameShort} bundle. Access role-specific commands from the `role` subcommand: + +[source,bash] +---- +ansible-galaxy role init +---- + +Standalone roles outside of Collections are supported. +Create new roles inside a Collection to take advantage of the features {PlatformNameShort} has to offer. + +.Procedure + +. In a terminal, navigate to the `roles` directory inside a collection. +. Create a role called `my_role` inside the collection: ++ +---- +$ ansible-galaxy role init my_role +---- ++ +The collection now includes a role named `my_role` inside the `roles` directory, as you can see in this example: ++ +---- +~/.ansible/collections/ansible_collections// + ... + └── roles/ + └── my_role/ + ├── .travis.yml + ├── README.md + ├── defaults/ + │ └── main.yml + ├── files/ + ├── handlers/ + │ └── main.yml + ├── meta/ + │ └── main.yml + ├── tasks/ + │ └── main.yml + ├── templates/ + ├── tests/ + │ ├── inventory + │ └── test.yml + └── vars/ + └── main.yml +---- +. A custom role skeleton directory can be supplied by using the `--role-skeleton` argument. +This allows organizations to create standardized templates for new roles to suit their needs. ++ +---- +$ ansible-galaxy role init my_role --role-skeleton ~/role_skeleton +---- ++ +This creates a role named `my_role` by copying the contents of `~/role_skeleton` into `my_role`. +The contents of `role_skeleton` can be any files or folders that are valid inside a role directory. diff --git a/downstream/modules/platform/proc-creating-collection-namespace.adoc b/downstream/modules/platform/proc-creating-collection-namespace.adoc index 78a0a369e8..4084ac7c12 100644 --- a/downstream/modules/platform/proc-creating-collection-namespace.adoc +++ b/downstream/modules/platform/proc-creating-collection-namespace.adoc @@ -34,7 +34,7 @@ Once the namespace has been created, you can import the collection by using the . Click btn:[Upload]. -This opens the 'My Imports' page. You can see the status of the import and various details of the files and modules that have been imported. +This opens the 'My Imports' page. You can see the status of the import and various details of the files and modules that have been imported. == Importing the collection tarball by using the CLI diff --git a/downstream/modules/platform/proc-creating-the-custom-execution-environment-definition.adoc b/downstream/modules/platform/proc-creating-the-custom-execution-environment-definition.adoc index 291739651d..ace86cbad1 100644 --- a/downstream/modules/platform/proc-creating-the-custom-execution-environment-definition.adoc +++ b/downstream/modules/platform/proc-creating-the-custom-execution-environment-definition.adoc @@ -97,7 +97,7 @@ trusted-host = ---- + -. Optional: If a `bindep.txt` file is being used to add RPMs the custom {ExecEnvShort}, create a `custom.repo` file under the `files/` subdirectory that points to your disconnected Satellite or other location hosting the RPM repositories. If this step is necessary, uncomment the steps in the example `execution-environment.yml` file that correspond with the `custom.repo` file. +. Optional: If you use a `bindep.txt` file to add RPMs the custom {ExecEnvShort}, create a `custom.repo` file under the `files/` subdirectory that points to your disconnected Satellite or other location hosting the RPM repositories. If this step is necessary, uncomment the steps in the example `execution-environment.yml` file that correspond with the `custom.repo` file. + The following example is for the UBI repos. Other local repos can be added to this file as well. The URL path may need to change depending on where the mirror content is located on the web server. diff --git a/downstream/modules/platform/proc-custom-logos-images.adoc b/downstream/modules/platform/proc-custom-logos-images.adoc new file mode 100644 index 0000000000..fbdf33212f --- /dev/null +++ b/downstream/modules/platform/proc-custom-logos-images.adoc @@ -0,0 +1,26 @@ +[id="proc-custom-logos-images"] + +//[ddacosta]Obsolete, this information is provided in the proc-settings-platform-gateway.adoc module now. + += Setting a custom logo + +{ControllerNameStart} and {PlatformName} support the use of a custom logo. + +You can add a custom logo by uploading an image and supplying a custom login message from the *{GatewayStart} settings* page. + +.Procedure + +. From the navigation panel, select {MenuSetGateway}. +. In the *Custom login info* field, provide specific information (such as a legal notice or a disclaimer). +. In the *Custom logo* field, provide an image file for setting up a custom logo (must be a data URL with a base64-encoded GIF, PNG, or JPEG image). + +.Example + +You upload a specific logo and add the following text: + +image::ag-configure-tower-ui-logo-filled.png[Adding a custom logo] + +The {PlatformNameShort} login dialog resembles the following: + +image::ag-configure-aap-ui-angry-spud-login.png[Login page with custom logo] + diff --git a/downstream/modules/platform/proc-define-mesh-node-types.adoc b/downstream/modules/platform/proc-define-mesh-node-types.adoc index 2923f1692f..6f525e1510 100644 --- a/downstream/modules/platform/proc-define-mesh-node-types.adoc +++ b/downstream/modules/platform/proc-define-mesh-node-types.adoc @@ -1,8 +1,8 @@ [id="proc-define-mesh-node-types"] -ifdef::controller-AG[] +ifdef::controller-UG[] = Managing instances -endif::controller-AG[] +endif::controller-UG[] ifdef::operator-mesh[] = Defining {AutomationMesh} node types endif::operator-mesh[] @@ -19,6 +19,13 @@ These hop nodes are not part of the Kubernetes cluster and are registered in {Co The following procedure demonstrates how to set the node type for the hosts. +ifdef::operator-mesh[] +[NOTE] +==== +By default, {SaaSonAWS} includes two hop nodes that you can peer execution nodes to. +==== +endif::operator-mesh[] + .Procedure //[ddacosta]Removed specified panel to simplify changes in the future. . From the navigation panel, select {MenuInfrastructureInstances}. @@ -50,29 +57,28 @@ Options: ** *Enable instance*: Check this box to make it available for jobs to run on an execution node. ** Check the *Managed by policy* box to enable policy to dictate how the instance is assigned. -** Check the *Peers from control nodes* box to enable control nodes to peer to this instance automatically. -For nodes connected to {ControllerName}, check the *Peers from Control* nodes box to create a direct communication link between that node and {ControllerName}. -For all other nodes: - -*** If you are not adding a hop node, make sure *Peers from Control* is checked. -*** If you are adding a hop node, make sure *Peers from Control* is not checked. -*** For execution nodes that communicate with hop nodes, do not check this box. -** To peer an execution node with a hop node, click the image:search.png[Search,15,15] icon next to the *Peers* field. -+ -The Select Peers window is displayed. -+ -Peer the execution node to the hop node. - -. Click btn:[Save]. -+ -image::instances_create_details.png[Create Instance details] - +** *Peers from control nodes*: +*** If you are configuring a hop node: +**** If the hop node needs to have requests pushed directly from {ControllerName}, then check the *Peers from Control* box. +// This creates a direct communication link between the hop node and {ControllerName}. +**** If the hop node is peered to another hop node, then make sure *Peers from Control* is not checked. +*** If you are configuring an execution node: +**** If the execution node needs to have requests pushed directly from {ControllerName}, then check the *Peers from Control* box. +// This creates a direct communication link between the execution node and {ControllerName}. +**** If the execution node is peered to a hop node, then make sure that *Peers from Control* is not checked. +. Click btn:[Associate peers]. +//+ +//image::instances_create_details.png[Create Instance details] ifdef::operator-mesh[] -. To view a graphical representation of your updated topology, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/assembly-controller-topology-viewer[Topology viewer]. +. To verify peering configuration and the direction of traffic, you can use the topology view +to view a graphical representation of your updated topology. +This can help to determine where your firewall rules might need to be updated. +For more information, see link:{URLControllerUserGuide}/assembly-controller-topology-viewer[Topology view]. endif::operator-mesh[] -ifdef::controller-AG[] -. To view a graphical representation of your updated topology, see xref:assembly-controller-topology-viewer[Topology viewer]. -endif::controller-AG[] +ifdef::controller-UG[] +. To view a graphical representation of your updated topology, see +xref:assembly-controller-topology-viewer[Topology view]. +endif::controller-UG[] + [NOTE] ==== @@ -97,12 +103,6 @@ requirements.yml . Extract the downloaded `tar.gz` Install Bundle from the location where you downloaded it. To ensure that these files are in the correct location on the remote machine, the install bundle includes the `install_receptor.yml` playbook. -The playbook requires the Receptor collection. -Run the following command to download the collection: -+ ----- -ansible-galaxy collection install -r requirements.yml ----- . Before running the `ansible-playbook` command, edit the following fields in the `inventory.yml` file: + @@ -110,9 +110,9 @@ ansible-galaxy collection install -r requirements.yml all: hosts: remote-execution: - ansible_host: 10.0.0.6 - ansible_user: # user provided - ansible_ssh_private_key_file: ~/.ssh/ + ansible_host: localhost # change to the mesh node host name + ansible_user: # user provided + ansible_ssh_private_key_file: ~/.ssh/ ---- * Ensure `ansible_host` is set to the IP address or DNS of the node. @@ -150,26 +150,64 @@ Additionally, it retrieves any other collection dependencies that might be neede * Install the receptor collection on all nodes where your playbook will run, otherwise an error occurs. . If `receptor_listener_port` is defined, the machine also requires an available open port on which to establish inbound TCP connections, for example, 27199. -Run the following command to open port 27199 for receptor communication: +Run the following command to open port 27199 for receptor communication (Make sure you have port 27199 open in your firewall): + ---- sudo firewall-cmd --permanent --zone=public --add-port=27199/tcp ---- ++ +[NOTE] +==== +It might be the case that some servers do not listen on receptor port (the default is 27199) + +Suppose you have a Control plane with nodes A, B, C, D + +The RPM installer creates a strongly connected peering between the control plane nodes with a least privileged approach and opens the tcp listener only on those nodes where it is required. All the receptor connections are bidirectional, so once the connection is created, the receptor can communicate in both directions. + +The following is an example peering set up for three controller nodes: + +Controller node A --> Controller node B + +Controller node A --> Controller node C + +Controller node B --> Controller node C + +You can force the listener by setting + +`receptor_listener=True` + +However, a connection Controller B --> A is likely to be rejected as that connection already exists. + +This means that nothing connects to Controller A as Controller A is creating the connections to the other nodes, and the following command does not return anything on Controller A: + +`[root@controller1 ~]# ss -ntlp | grep 27199 [root@controller1 ~]#` +==== . Run the following playbook on the machine where you want to update your automation mesh: + ---- ansible-playbook -i inventory.yml install_receptor.yml ---- -+ +[Note] +==== +OpenSSL is required for this playbook. You can install it by running the following command: +---- +openssl -v +---- +If it returns then a version OpenSSL is installed. Otherwise you need to install OpenSSL with: +---- +sudo dnf install -y openssl +---- +==== + After this playbook runs, your automation mesh is configured. -+ + image::instances_list_view2.png[Instances list view] ifdef::operator-mesh[] To remove an instance from the mesh, see xref:ref-removing-instances[Removing instances]. endif::operator-mesh[] -ifdef::controller-AG[] -To remove an instance from the mesh, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_automation_mesh_for_operator-based_installations/assembly-automation-mesh-operator-aap#ref-removing-instances[Removing instances]. -endif::controller-AG[] +ifdef::controller-UG[] +To remove an instance from the mesh, see xref:ref-removing-instances[Removing instances]. +endif::controller-UG[] diff --git a/downstream/modules/platform/proc-deploy-eda-controller-with-aap-operator-ocp.adoc b/downstream/modules/platform/proc-deploy-eda-controller-with-aap-operator-ocp.adoc index 1a75943d32..99e066b825 100644 --- a/downstream/modules/platform/proc-deploy-eda-controller-with-aap-operator-ocp.adoc +++ b/downstream/modules/platform/proc-deploy-eda-controller-with-aap-operator-ocp.adoc @@ -5,7 +5,7 @@ .Prerequisites -* You have installed {OperatorPlatform} on {OCPShort}. +* You have installed {OperatorPlatformNameShort} on {OCPShort}. * You have installed and configured {ControllerName}. .Procedure @@ -14,9 +14,22 @@ . Locate and select your installation of {PlatformNameShort}. -. Under *Provided APIs*, locate the {EDAName} modal and click *Create instance*. +. Under the *Details* tab, locate the *EDA* modal and click *Create instance*. + +. Click btn:[Form view], and in the *Name* field, enter the name you want for your new {EDAcontroller} deployment. + -This takes you to the Form View to customize your installation. +[IMPORTANT] +==== +If you have installed other {PlatformNameShort} components in your current {OCPShort} namespace, ensure that you provide a unique name for your {EDAcontroller} when you create your {EDAName} custom resource. Otherwise, naming conflicts can occur and impact {EDAcontroller} deployment. +==== +. Specify your controller URL in the *Automation Server URL* field. ++ +If you deployed {ControllerName} in Openshift as well, you can find the URL in the navigation panel under menu:Networking[Routes]. ++ +[NOTE] +==== +This is the only required customization, but you can customize other options using the UI form or directly in the YAML configuration tab, if desired. +==== + [IMPORTANT] ==== @@ -35,34 +48,16 @@ extra_settings: value: '12' ---- + -. Click btn:[Reload] and btn:[Save]. Return to the *Form* view. - -. In the *Name* field, enter the name you want for your new {EDAcontroller} deployment. -+ -[IMPORTANT] -==== -If you have other {PlatformNameShort} components installed in your current {OCPShort} namespace, ensure that you provide a unique name for your {EDAcontroller} when you create your {EDAName} custom resource. Otherwise, naming conflicts can occur and impact {EDAcontroller} deployment. -==== -+ -. Specify your controller URL. -+ -If you deployed {ControllerName} in Openshift as well, you can find the URL in the navigation panel under menu:Networking[Routes]. -+ -[NOTE] -==== -This is the only required customization, but you can customize other options using the UI form or directly in the YAML configuration tab, if desired. -==== - . Click btn:[Create]. This deploys {EDAcontroller} in the namespace you specified. + -After a couple minutes when the installation is marked as *Successful*, you can find the URL for the {EDAName} UI on the *Routes* page in the Openshift UI. +After a couple minutes when the installation is marked as *Successful*, you can find the URL for the {EDAName} UI on the *Routes* page in the OpenShift UI. . From the navigation panel, select menu:Networking[Routes] to find the new Route URL that has been created for you. + Routes are listed according to the name of your custom resource. -. Click the new URL to navigate to {EDAName} in the browser. +. Click the new URL under the *Location* column to navigate to {EDAName} in the browser. . From the navigation panel, select menu:Workloads[Secrets] and locate the Admin Password k8s secret that was created for you, unless you specified a custom one. + diff --git a/downstream/modules/platform/proc-deprovision-group.adoc b/downstream/modules/platform/proc-deprovision-group.adoc index 44753741ae..401698c006 100644 --- a/downstream/modules/platform/proc-deprovision-group.adoc +++ b/downstream/modules/platform/proc-deprovision-group.adoc @@ -16,7 +16,7 @@ You can deprovision any hosts in your inventory except for the first host specif .Procedure -* Add `*node_state=deprovision*` to the [group:vars] associated with the group you wish to deprovision. +* Add `*node_state=deprovision*` to the [group:vars] associated with the group you want to deprovision. .Example diff --git a/downstream/modules/platform/proc-determine-hub-route.adoc b/downstream/modules/platform/proc-determine-hub-route.adoc index f1ad4e5b19..9d1257d4ec 100644 --- a/downstream/modules/platform/proc-determine-hub-route.adoc +++ b/downstream/modules/platform/proc-determine-hub-route.adoc @@ -6,6 +6,7 @@ Use the following procedure to determine the hub route. .Procedure +. Log in to {OCP}. . Navigate to menu:Networking[Routes]. . Select the project you used for the install. . Copy the location of the `private-ah-web-svc` service. diff --git a/downstream/modules/platform/proc-downloading-containerized-aap.adoc b/downstream/modules/platform/proc-downloading-containerized-aap.adoc index 9b8451c9fe..51b59c34bd 100644 --- a/downstream/modules/platform/proc-downloading-containerized-aap.adoc +++ b/downstream/modules/platform/proc-downloading-containerized-aap.adoc @@ -1,30 +1,32 @@ :_mod-docs-content-type: PROCEDURE -[id="downloading-containerizzed-aap_{context}"] +[id="downloading-containerized-aap_{context}"] = Downloading {PlatformNameShort} -[role="_abstract"] +Choose the installation program you need based on your {RHEL} environment internet connectivity and download the installation program to your {RHEL} host. .Procedure -. Download the latest installer tarball from link:https://access.redhat.com/downloads/content/480/ver=2.4/rhel---9/2.4/x86_64/product-software[access.redhat.com]. This can be done directly within the RHEL host, which saves time. +. Download the latest installer .tar file from the link:{PlatformDownloadUrl}[{PlatformNameShort} download page]. +.. For online installations: *{PlatformNameShort} {PlatformVers} Containerized Setup* +.. For offline or bundled installations: *{PlatformNameShort} {PlatformVers} Containerized Setup Bundle* -. If you have downloaded the tarball and optional manifest zip file onto your laptop, copy them onto your RHEL host. -+ -Decide where you would like the installer to reside on the filesystem. Installation related files will be created under this location and require at least 10Gb for the initial installation. -+ -. Unpack the installer tarball into your installation directory, and cd into the unpacked directory. +. Copy the installation program .tar file and the optional manifest .zip file onto your {RHEL} host. + +. Decide where you want the installation program to reside on the file system. Installation related files are created under this location and require at least 10 GB for the initial installation. + +. Unpack the installation program .tar file into your installation directory, and go to the unpacked directory. + -.. online installer +.. To unpack the online installer: + ---- -$ tar xfvz ansible-automation-platform-containerized-setup-2.4-2.tar.gz +$ tar xfvz ansible-automation-platform-containerized-setup-.tar.gz ---- + -.. bundled installer +.. To unpack the offline or bundled installer: + ---- -$ tar xfvz ansible-automation-platform-containerized-setup-bundle-2.4-2-.tar.gz +$ tar xfvz ansible-automation-platform-containerized-setup-bundle--.tar.gz ---- diff --git a/downstream/modules/platform/proc-edge-manager-access-devices-cli.adoc b/downstream/modules/platform/proc-edge-manager-access-devices-cli.adoc new file mode 100644 index 0000000000..7f84e7cdc2 --- /dev/null +++ b/downstream/modules/platform/proc-edge-manager-access-devices-cli.adoc @@ -0,0 +1,17 @@ +[id="edge-manager-access-devices-cli"] + += Accessing devices on the CLI + +Access and manage devices directly through the CLI, enabling you to perform tasks remotely and efficiently. + +.Procedure + +* To connect, use the `flightctl console` command specifying the device's name, and the agent establishes the console connection the next time it calls home (pull mode) or instantaneously (push mode): + +[source,console] +---- +flightctl console +---- + +* To disconnect, enter "exit" on the console. +To force-disconnect, press `+b` three times. diff --git a/downstream/modules/platform/proc-edge-manager-access-devices-web-ui.adoc b/downstream/modules/platform/proc-edge-manager-access-devices-web-ui.adoc new file mode 100644 index 0000000000..3e85da530b --- /dev/null +++ b/downstream/modules/platform/proc-edge-manager-access-devices-web-ui.adoc @@ -0,0 +1,5 @@ +[id="edge-manager-access-devices-web-ui"] + += Accessing devices on the web UI + + diff --git a/downstream/modules/platform/proc-edge-manager-create-apps.adoc b/downstream/modules/platform/proc-edge-manager-create-apps.adoc new file mode 100644 index 0000000000..d66870f10a --- /dev/null +++ b/downstream/modules/platform/proc-edge-manager-create-apps.adoc @@ -0,0 +1,22 @@ +[id="edge-manager-create-apps"] + += Creating applications + +You can create an Open Container Initiative (OCI) registry application package with the following steps. + +.Procedure + +. Define the application's functionality with the link:https://github.com/compose-spec/compose-spec/blob/main/spec.md[Compose specification]. +. Embed the compose file in a scratch container. +. Add the `appType=compose` label, then build and push the container to your OCI registry. +. Reference the image in `spec.applications[]`. ++ +[source,yaml] +---- +FROM scratch + +COPY podman-compose.yaml /podman-compose.yaml + +# required +LABEL appType="compose" +---- diff --git a/downstream/modules/platform/proc-edge-manager-generate-device-log.adoc b/downstream/modules/platform/proc-edge-manager-generate-device-log.adoc new file mode 100644 index 0000000000..10b28e3d80 --- /dev/null +++ b/downstream/modules/platform/proc-edge-manager-generate-device-log.adoc @@ -0,0 +1,20 @@ +[id="edge-manager-generate-device-log"] + += Generating a device log bundle + +The device includes a script that generates a bundle of logs necessary to debug the agent. + +.Procedure + +* Run the following command on the device and include the .tar file in the bug report. ++ +[NOTE] +==== +This depends on an SSH connection to extract the .tar file. +==== ++ +[literal, options="nowrap" subs="+attributes"] +---- +sudo flightctl-must-gather +---- + diff --git a/downstream/modules/platform/proc-edge-manager-manage-apps-cli.adoc b/downstream/modules/platform/proc-edge-manager-manage-apps-cli.adoc new file mode 100644 index 0000000000..6d682f3a20 --- /dev/null +++ b/downstream/modules/platform/proc-edge-manager-manage-apps-cli.adoc @@ -0,0 +1,76 @@ +[id="edge-manager-manage-apps-cli"] + += Managing applications on the CLI + +Managing applications through the Command Line Interface (CLI) allows for efficient and streamlined control. + +.Procedure + +* To deploy an application package from an OCI registry, specify it in the device's `spec.applications[]` as follows: ++ +[source,yaml] +---- +apiVersion: v1alpha1 +kind: Device +metadata: + name: some_device_name +spec: +[...] + applications: + - name: wordpress + image: quay.io/flightctl-demos/wordpress-app:latest + envVars: + WORDPRESS_DB_HOST: "mysql" + WORDPRESS_DB_USER: "user" + WORDPRESS_DB_PASSWORD: "password" +[...] +---- + +* To deploy an unpackaged application from a Git repository, specify it in the device's `spec.applications[]` as follows: ++ +[source,yaml] +---- +apiVersion: v1alpha1 +kind: Device +metadata: + name: some_device_name +spec: +[...] + applications: + - name: wordpress + git: + url: https://github.com/flightctl/flightctl-demos.git + revision: v1.0 + path: /wordpress + envVars: + WORDPRESS_DB_HOST: "mysql" + WORDPRESS_DB_USER: "user" + WORDPRESS_DB_PASSWORD: "password" +[...] +---- + +* To deploy an unpackaged application inline with the device specification, specify it in the device's `spec.applications[]` as follows: ++ +[source,yaml] +---- +apiVersion: v1alpha1 +kind: Device +metadata: + name: some_device_name +spec: +[...] + applications: + - name: wordpress + inline: + podman-compose.yaml: | + version: “3.7" + services: + wordpress: + image: “wordpress:latest” + [...] + envVars: + WORDPRESS_DB_HOST: "mysql" + WORDPRESS_DB_USER: "user" + WORDPRESS_DB_PASSWORD: "password" +[...] +---- diff --git a/downstream/modules/platform/proc-edge-manager-manage-apps-ui.adoc b/downstream/modules/platform/proc-edge-manager-manage-apps-ui.adoc new file mode 100644 index 0000000000..ecf42d6d11 --- /dev/null +++ b/downstream/modules/platform/proc-edge-manager-manage-apps-ui.adoc @@ -0,0 +1,5 @@ +[id="edge-manager-manage-apps-ui"] + += Managing applications on the web UI + + diff --git a/downstream/modules/platform/proc-edge-manager-manage-apps.adoc b/downstream/modules/platform/proc-edge-manager-manage-apps.adoc new file mode 100644 index 0000000000..87922e480a --- /dev/null +++ b/downstream/modules/platform/proc-edge-manager-manage-apps.adoc @@ -0,0 +1,60 @@ +[id="edge-manager-manage-apps"] + += Managing applications + +You can deploy, update, or remove applications on a device by updating the list of applications in the device's specification. +The next time the agent checks in, it learns of the change in the specification, downloads any new or updated application packages and images from an OCI-compatible registry. +Then it deploys them to the appropriate application runtime or removes them from that runtime. + +The following table shows the application runtimes and formats supported by {RedHatEdge}: + +[width="100%",cols="33%,33%,33%,33%,33%",options="header",] +|=== +|Runtime |Descriptor Format |Package Format |Package Repository |Note |Podman |https://github.com/containers/podman-compose[podman-compose] |(name TBD) |OCI registry |requires `podman-compose` installed on device + +|Podman |https://github.com/containers/podman-compose[podman-compose] |(unpackaged) |git or inline |requires `podman-compose` installed on device + +|Podman |https://docs.podman.io/en/stable/markdown/podman-systemd.unit.5.html[Quadlet] |(name TBD) |OCI registry | + +|Podman +|https://docs.podman.io/en/stable/markdown/podman-systemd.unit.5.html[Quadlet] +|(unpackaged) |git or inline | + +|MicroShift |Kubernetes manifests from https://helm.sh/docs/helm/helm_template/[Helm templates] |Helm Chart +|OCI registry |requires `helm` installed on device + +|MicroShift |Kubernetes manifests from https://kustomize.io/[kustomize] +|(unpackaged) |git or inline | +|=== + +.Procedure + +* To deploy an application to a device, create a new entry in the "applications" section of the device's specification, specifying the following parameters: ++ +[width="100%",cols="45%,55%",options="header",] +|=== +|Parameter |Description +|Name |A user-defined name for the application. This is used when the web UI and CLI list applications. + +|Image |A reference to an application package in an OCI registry. + +|EnvVars |(Optional) A list of key/value-pairs that are passed to the deployment tool as environment variables or command line flags. +|=== ++ +For each application in the "applications" section of the device's specification, there is a corresponding device status information that has the following information: ++ +[width="100%",cols="48%,52%",options="header",] +|=== +|Status Field |Description +|Preparing |Application deployed; containers initialized but not yet running. + +|Starting |Application started; at least one container running, awaiting results. + +|Running |All containers are running. + +|Error |All containers failed. + +|Unknown |Application started, no containers observed. + +|Completed |All containers have completed successfully. +|=== diff --git a/downstream/modules/platform/proc-edge-manager-monitor-device-resources-cli.adoc b/downstream/modules/platform/proc-edge-manager-monitor-device-resources-cli.adoc new file mode 100644 index 0000000000..5769e23347 --- /dev/null +++ b/downstream/modules/platform/proc-edge-manager-monitor-device-resources-cli.adoc @@ -0,0 +1,38 @@ +[id="edge-manager-monitor-device-resources-cli"] + += Monitoring device resources on the CLI + +Effectively monitor the resources of your device through the CLI, providing you with the tools and commands to track performance and troubleshoot issues. + +.Procedure + +* To check resource use, add resource monitors in the `resources:` section of the device's specification. + +.Example + +Check disk use on the filesystem associated with the path /applications. +This can trigger a warning alert if the average use exceeds 75% for more than 30 minutes and a critical alert if it exceeds 90% for over 10 minutes with a sampling interval of 5 seconds. + +[source,yaml] +---- +apiVersion: v1alpha1 +kind: Device +metadata: + name: some_device_name +spec: +[...] + resources: + - monitorType: Disk + samplingInterval: 5s + path: /application_data + alertRules: + - severity: Warning + duration: 30m + percentage: 75 + description: Disk space for application data is >75% full for over 30m. + - severity: Critical + duration: 10m + percentage: 90 + description: Disk space for application data is >90% full over 10m. +[...] +---- diff --git a/downstream/modules/platform/proc-edge-manager-monitor-device-resources-web-ui.adoc b/downstream/modules/platform/proc-edge-manager-monitor-device-resources-web-ui.adoc new file mode 100644 index 0000000000..483b66f5f5 --- /dev/null +++ b/downstream/modules/platform/proc-edge-manager-monitor-device-resources-web-ui.adoc @@ -0,0 +1,5 @@ +[id="edge-manager-monitor-device-resources-web-ui"] + += Monitoring device resources on the web UI + + diff --git a/downstream/modules/platform/proc-edge-manager-view-device-config.adoc b/downstream/modules/platform/proc-edge-manager-view-device-config.adoc new file mode 100644 index 0000000000..b4d5f9d64f --- /dev/null +++ b/downstream/modules/platform/proc-edge-manager-view-device-config.adoc @@ -0,0 +1,17 @@ +[id="edge-manager-view-device-config"] + += Viewing a device's effective target configuration + +The device manifest returned by the `flightctl get device` command still only has references to external configuration and secret objects. +Only when the device agent queries the service, the service replaces the references with the actual configuration and secret data. +While this better protects potentially sensitive data, it also makes troubleshooting faulty configurations hard. +This is why a user can be authorized to query the effective configuration as rendered by the service to the agent. + +.Procedure + +* To query the effective configuration, use the following command: ++ +[literal, options="nowrap" subs="+attributes"] +---- +flightctl get device/${device_name} --rendered | jq +---- diff --git a/downstream/modules/platform/proc-editing-inventory-file-for-updates.adoc b/downstream/modules/platform/proc-editing-inventory-file-for-updates.adoc index b5803ea2be..ed0a1df90d 100644 --- a/downstream/modules/platform/proc-editing-inventory-file-for-updates.adoc +++ b/downstream/modules/platform/proc-editing-inventory-file-for-updates.adoc @@ -5,6 +5,8 @@ Before upgrading your {PlatformName} installation, edit the `inventory` file so that it matches your desired configuration. You can keep the same parameters from your existing {PlatformNameShort} deployment or you can modify the parameters to match any changes to your environment. +You can find sample inventory files in the link:https://github.com/ansible/test-topologies/[Test topologies] GitHub repository, or in our link:{LinkTopologies} guide. + .Procedure . Navigate to the installation program directory. @@ -12,24 +14,24 @@ Bundled installer:: + [source,options="nowrap",subs=attributes+] ----- -$ cd ansible-automation-platform-setup-bundle-2.4-1-x86_64 +$ cd ansible-automation-platform-setup-bundle-2.5-4-x86_64 ----- + Online installer:: + [source,options="nowrap",subs=attributes+] ----- -$ cd ansible-automation-platform-setup-2.4-1 +$ cd ansible-automation-platform-setup-2.5-4 ----- . Open the `inventory` file for editing. . Modify the `inventory` file to provision new nodes, deprovision nodes or groups, and import or generate {HubName} API tokens. + -You can use the same `inventory` file from an existing {PlatformNameShort} 2.1 installation if there are no changes to the environment. +You can use the same `inventory` file from an existing {PlatformNameShort} installation if there are no changes to the environment. + [NOTE] ==== -Provide a reachable IP address or fully qualified domain name (FQDN) for the `[automationhub]` and `[automationcontroller]` hosts to ensure that users can synchronize and install content from {HubNameMain} from a different node. +Provide a reachable IP address or fully qualified domain name (FQDN) for all hosts to ensure that users can synchronize and install content from {HubNameMain} from a different node. Do not use `localhost`. If `localhost` is used, the upgrade will be stopped as part of preflight checks. ==== @@ -42,30 +44,3 @@ If `localhost` is used, the upgrade will be stopped as part of preflight checks. ---- include::ini/clustered-nodes.ini[] ---- - -.Deprovisioning nodes or groups in a cluster - -* Append `node_state-deprovision` to the node or group within the `inventory` file. - -.Importing and generating API tokens - -When upgrading from {PlatformName} 2.0 or earlier to {PlatformName} 2.1 or later, you can use your existing {HubName} API token or generate a new token. In the inventory file, edit one of the following fields before running the {PlatformName} installer setup script `setup.sh`: - -* Import an existing API token with the `automationhub_api_token` flag as follows: -+ -[options="nowrap",subs="+quotes"] ----- -automationhub_api_token=____ ----- - -* Generate a new API token, and invalidate any existing tokens, with the `generate_automationhub_token` flag as follows: -+ -[options="nowrap",subs="+quotes"] ----- -generate_automationhub_token=True ----- - -[role="_additional-resources"] -.Additional resources -* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/index[{PlatformName} Installation Guide] -* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_automation_mesh_guide_for_vm-based_installations/assembly-deprovisioning-mesh[Deprovisioning individual nodes or instance groups] diff --git a/downstream/modules/platform/proc-editing-inventory-file.adoc b/downstream/modules/platform/proc-editing-inventory-file.adoc index 4c59f9d082..6204167d23 100644 --- a/downstream/modules/platform/proc-editing-inventory-file.adoc +++ b/downstream/modules/platform/proc-editing-inventory-file.adoc @@ -29,8 +29,15 @@ $ cd ansible-automation-platform-setup- ----- + . Open the `inventory` file with a text editor. -. Edit `inventory` file parameters to specify your installation scenario. You can use one of the supported xref:con-install-scenario-examples[Installation scenario examples] as the basis for your `inventory` file. +. Edit `inventory` file parameters to specify your installation scenario. +ifdef::mesh-VM[] +For further information, see link:{URLInstallationGuide}/assembly-platform-install-scenario#proc-editing-installer-inventory-file_platform-install-scenario[Editing the {PlatformName} installer inventory file] +endif::mesh-VM[] +ifdef::aap-install[] +You can use one of the supported xref:con-install-scenario-examples[Installation scenario examples] as the basis for your `inventory` file. [role="_additional-resources"] .Additional resources * For a comprehensive list of pre-defined variables used in Ansible installation inventory files, see xref:appendix-inventory-files-vars[Inventory file variables]. +endif::aap-install[] + diff --git a/downstream/modules/platform/proc-enable-hstore-extension.adoc b/downstream/modules/platform/proc-enable-hstore-extension.adoc index e5552cf33a..9ba157152a 100644 --- a/downstream/modules/platform/proc-enable-hstore-extension.adoc +++ b/downstream/modules/platform/proc-enable-hstore-extension.adoc @@ -2,13 +2,13 @@ = Enabling the hstore extension for the {HubName} PostgreSQL database -From {PlatformNameShort} {PlatformVers}, the database migration script uses `hstore` fields to store information, therefore the `hstore` extension to the {HubName} PostgreSQL database must be enabled. +Added in {PlatformNameShort} {PlatformVers}, the database migration script uses `hstore` fields to store information, therefore the `hstore` extension must be enabled in the {HubName} PostgreSQL database. This process is automatic when using the {PlatformNameShort} installer and a managed PostgreSQL server. -If the PostgreSQL database is external, you must enable the `hstore` extension to the {HubName} PostreSQL database manually before {HubName} installation. +If the PostgreSQL database is external, you must enable the `hstore` extension in the {HubName} PostgreSQL database manually before installation. -If the `hstore` extension is not enabled before {HubName} installation, a failure is raised during database migration. +If the `hstore` extension is not enabled before installation, a failure raises during database migration. .Procedure . Check if the extension is available on the PostgreSQL server ({HubName} database). @@ -18,7 +18,7 @@ If the `hstore` extension is not enabled before {HubName} installation, a failur $ psql -d <{HubName} database> -c "SELECT * FROM pg_available_extensions WHERE name='hstore'" ---- + -Where the default value for `<{HubName} database>` is `automationhub`. +. Where the default value for `<{HubName} database>` is `automationhub`. + *Example output with `hstore` available*: @@ -49,20 +49,14 @@ To install the RPM package, use the following command: ---- dnf install postgresql-contrib ---- -. Create the `hstore` PostgreSQL extension on the {HubName} database with the following command: +. Load the `hstore` PostgreSQL extension into the {HubName} database with the following command: + [options="nowrap" subs="+quotes,attributes"] ---- $ psql -d <{HubName} database> -c "CREATE EXTENSION hstore;" ---- + -The output of which is: -+ -[options="nowrap" subs="+quotes,attributes"] ----- -CREATE EXTENSION ----- -. In the following output, the `installed_version` field contains the `hstore` extension used, indicating that `hstore` is enabled. +In the following output, the `installed_version` field lists the `hstore` extension used, indicating that `hstore` is enabled. + [options="nowrap" subs="+quotes,attributes"] ---- diff --git a/downstream/modules/platform/proc-enable-proxy-support.adoc b/downstream/modules/platform/proc-enable-proxy-support.adoc index 720871fb2a..412899e42a 100644 --- a/downstream/modules/platform/proc-enable-proxy-support.adoc +++ b/downstream/modules/platform/proc-enable-proxy-support.adoc @@ -2,16 +2,15 @@ [id="proc-enable-proxy-support_{context}"] = Enable proxy support - +//FYI - In 2.5 EA, the System menu is specific to controller so do not change to AAP. To provide proxy server support, {ControllerName} handles proxied requests (such as ALB, NLB , HAProxy, Squid, Nginx and tinyproxy in front of {ControllerName}) via the *REMOTE_HOST_HEADERS* list variable in the {ControllerName} settings. By default, *REMOTE_HOST_HEADERS* is set to `["REMOTE_ADDR", "REMOTE_HOST"]`. To enable proxy server support, edit the *REMOTE_HOST_HEADERS* field in the settings page for your {ControllerName}: .Procedure -. On your {ControllerName}, navigate to {MenuAEAdminSettings}. -. Select *Miscellaneous System settings* from the list of *System* options. -. In the *REMOTE_HOST_HEADERS* field, enter the following values: +. From the navigation panel, select {MenuSetSystem}. +. In the *Remote Host Headers* field, enter the following values: + ---- [ @@ -21,4 +20,4 @@ To enable proxy server support, edit the *REMOTE_HOST_HEADERS* field in the sett ] ---- -{ControllerNameStart} determines the remote host’s IP address by searching through the list of headers in *REMOTE_HOST_HEADERS* until the first IP address is located. +{ControllerNameStart} determines the remote host’s IP address by searching through the list of headers in *Remote Host Headers* until the first IP address is located. diff --git a/downstream/modules/platform/proc-gs-add-ee-to-job-template.adoc b/downstream/modules/platform/proc-gs-add-ee-to-job-template.adoc new file mode 100644 index 0000000000..94d1cd48c3 --- /dev/null +++ b/downstream/modules/platform/proc-gs-add-ee-to-job-template.adoc @@ -0,0 +1,41 @@ +[id="proc-gs-add-ee-to-job-template_{context}"] + += Adding an {ExecEnvShort} to a job template + +.Prerequisites + +* An {ExecEnvShort} must have been created using ansible-builder as described in link:{URLBuilder}/assembly-using-builder[Using {Builder}]. +When an {ExecEnvShort} has been created, you can use it to run jobs. +Use the {ControllerName} UI to specify the execution environment to use in your job templates. +* Depending on whether an {ExecEnvShort} is made available for global use or tied to an organization, you must have the appropriate level of administrator privileges to use an {ExecEnvShort} in a job. +Execution environments tied to an organization require Organization administrators to be able to run jobs with those {ExecEnvShort}. +* Before running a job or job template that uses an {ExecEnvShort} that has a credential assigned to it, ensure that the credential has a username, host, and password. + +.Procedure + +. From the navigation panel, select {MenuInfrastructureExecEnvironments}. +. Click btn:[Create execution environment] to create an {ExecEnvShort}. +. Enter the appropriate details into the following fields: +.. *Name* (required): Enter a name for the {ExecEnvShort}. +.. *Image* (required): Enter the image name. The image name requires its full location (repository), the registry, image name, and version tag, as in the following example: `quay.io/ansible/awx-ee:latestrepo/project/image-name:tag` +.. Optional: *Pull*: Choose the type of pull when running jobs: +... *Always pull container before running*: Pulls the latest image file for the container. +... *Only pull the image if not present before running*: Only pulls the latest image if none are specified. +... *Never pull container before running*: Never pull the latest version of the container image. ++ +NOTE: If you do not set a type for pull, the value defaults to *Only pull the image if not present before running*. ++ +.. Optional: *Description*: Enter an optional description. +.. Optional: *Organization*: Assign the organization to specifically use this {ExecEnvShort}. To make the {ExecEnvShort} available for use across multiple organizations, leave this field blank. +.. *Registry credential*: If the image has a protected container registry, provide the credential to access it. +. Click btn:[Create {ExecEnvShort}]. Your newly added {ExecEnvShort} is ready to be used in a job template. +. To add an {ExecEnvShort} to a job template, navigate to {MenuAETemplates} and select your template. +..Click btn:[Edit template] and specify your {ExecEnvShort} in the field labeled *{ExecEnvShort}*. + +When you have added an {ExecEnvShort} to a job template, the template will be listed in the *Templates* tab in your {ExecEnvShort} details. + +== About container registries + +If you have many {ExecEnvShort} that you want to maintain, you can store them in a container registry linked to your {PrivateHubName}. + +For more information, see link:{URLBuilder}/populate-container-registry[Populating your private automation hub container registry] from the {TitleBuilder} guide. diff --git a/downstream/modules/platform/proc-gs-auto-dev-create-automation-decision-proj.adoc b/downstream/modules/platform/proc-gs-auto-dev-create-automation-decision-proj.adoc new file mode 100644 index 0000000000..83efbc3524 --- /dev/null +++ b/downstream/modules/platform/proc-gs-auto-dev-create-automation-decision-proj.adoc @@ -0,0 +1,34 @@ +[id="proc-gs-auto-dev-create-automation-decision-proj"] + += Creating an automation decision project + +Like automation execution projects, automation decision projects are logical collections of automation decision content. +You can use the project function to organize your automation decision content in a way that makes sense to you. + +.Prerequisites + +* You have set up any neccessary credentials. +For more information, see the link:{URLEDAUserGuide}/eda-credentials#eda-set-up-credential[Setting up credentials] section of the {TitleEDAUserGuide} guide. +* You have an existing repository containing rulebooks that are integrated with playbooks contained in a repository to be used by {ControllerName}. + +.Procedure + +. From the navigation panel, select *{MenuADProjects}*. +. Click btn:[Create project]. +. Enter the following information: +* *Name*: Enter project name. +* *Description*: This field is optional. +* *Organization*: Select the organization associated with the project. +* *Source control type*: Git is the only SCM type available for use. +* *Proxy*: Proxy used to access HTTP or HTTPS servers. +* *Source control branch/tag/commit*: Branch to checkout. Can also be tags, commit hashes, or arbitrary refs. +* *Source control refspec*: A refspec to fetch. This parameter allows access to references through the branch field not otherwise available. +* Optional: *Source control credential*: The token needed to use the source control URL. +* *Content signature validation credential*: Enables content signing to verify that the content has remained secure during project syncing. If the content is tampered with, the job will not run. +* *Options*: Checking the box next to *Verify SSL* verifies the SSL with HTTPS when the project is imported. +. Click btn:[Create project]. + +Your project is now created and can be managed in the *Projects* screen. + +After saving the new project, the project's details page is displayed. +From there or the *Projects* list view, you can edit or delete it. diff --git a/downstream/modules/platform/proc-gs-auto-dev-create-automation-execution-proj.adoc b/downstream/modules/platform/proc-gs-auto-dev-create-automation-execution-proj.adoc new file mode 100644 index 0000000000..65a074ea63 --- /dev/null +++ b/downstream/modules/platform/proc-gs-auto-dev-create-automation-execution-proj.adoc @@ -0,0 +1,29 @@ +[id="proc-gs-auto-dev-create-automation-execution-proj"] + += Creating an automation execution project + +A project is a logical collection of playbooks. +Projects are useful as a way to group your automation content according to the organizing principle of your choice. + +You can set up an automation execution project in the platform UI. + +.Procedure + +. From the navigation panel, select {MenuAEProjects}. +. On the *Projects* page, click btn:[Create project] to launch the *Create Project* window. +. Enter the appropriate details into the following required fields: + +* *Name* (required) +* Optional: *Description* +* *Organization* (required): A project must have at least one organization. Select one organization now to create the project. When the project is created you can add additional organizations. +* Optional: *Execution Environment*: Enter the name of the {ExecEnvShort} or search from a list of existing ones to run this project. +* *Source Control Type* (required): Select an SCM type associated with this project from the menu. +Options in the following sections become available depending on the type chosen. +For more information, see link:{URLControllerUserGuide}/controller-projects#proc-controller-adding-a-project[Managing playbooks manually] or link:{URLControllerUserGuide}/controller-projects#ref-projects-manage-playbooks-with-source-control[Managing playbooks using source control]. +* Optional: *Content Signature Validation Credential*: Use this field to enable content verification. +Specify the GPG key to use for validating content signature during project synchronization. +If the content has been tampered with, the job will not run. +For more information, see link:{URLControllerUserGuide}/assembly-controller-project-signing[Project signing and verification]. ++ +. Click btn:[Create project]. + diff --git a/downstream/modules/platform/proc-gs-auto-dev-create-template.adoc b/downstream/modules/platform/proc-gs-auto-dev-create-template.adoc new file mode 100644 index 0000000000..cb002c477f --- /dev/null +++ b/downstream/modules/platform/proc-gs-auto-dev-create-template.adoc @@ -0,0 +1,177 @@ +[id="proc-gs-auto-dev-create-template"] + += Creating a job template + +.Procedure + +. From the navigation panel, select {MenuAETemplates}. +. On the *Templates* page, select *Create job template* from the *Create template* list. +. Enter the appropriate details in the following fields: ++ +[NOTE] +==== +If a field has the *Prompt on launch* checkbox selected, launching the job prompts you for the value for that field when launching. + +Most prompted values override any values set in the job template. + +Exceptions are noted in the following table. +==== ++ +[cols="33%,33%,33%",options="header"] +|=== +| *Field* | *Options* | *Prompt on Launch* +| Name | Enter a name for the job.| N/A +| Description| Enter an arbitrary description as appropriate (optional). | N/A +| Job Type a| Choose a job type: + +- Run: Start the playbook when launched, running Ansible tasks on the selected hosts. + +- Check: Perform a "dry run" of the playbook and report changes that would be made without actually making them. +Tasks that do not support check mode are missed and do not report potential changes. + +For more information about job types see the link:https://docs.ansible.com/ansible/latest/playbook_guide/index.html[Playbooks] section of the Ansible documentation.| Yes +| Inventory | Choose the inventory to use with this job template from the inventories available to the logged in user. + +A System Administrator must grant you or your team permissions to be able to use certain inventories in a job template. | Yes. + +Inventory prompts show up as its own step in a later prompt window. +| Project | Select the project to use with this job template from the projects available to the user that is logged in. | N/A +| Source control branch | This field is only present if you chose a project that allows branch override. +Specify the overriding branch to use in your job run. +If left blank, the specified SCM branch (or commit hash or tag) from the project is used. + +For more information, see link:{URLControllerUserGuide}/controller-jobs#controller-job-branch-overriding[Job branch overriding]. | Yes +| Execution Environment | Select the container image to be used to run this job. +You must select a project before you can select an {ExecEnvShort}. | Yes. + +Execution environment prompts show up as its own step in a later prompt window. +| Playbook | Choose the playbook to be launched with this job template from the available playbooks. +This field automatically populates with the names of the playbooks found in the project base path for the selected project. +Alternatively, you can enter the name of the playbook if it is not listed, such as the name of a file (such as foo.yml) you want to use to run with that playbook. +If you enter a filename that is not valid, the template displays an error, or causes the job to fail. | N/A +| Credentials | Select the image:examine.png[examine,15,15] icon to open a separate window. + +Choose the credential from the available options to use with this job template. + +Use the drop-down menu list to filter by credential type if the list is extensive. +Some credential types are not listed because they do not apply to certain job templates. a| +- If selected, when launching a job template that has a default credential and supplying another credential replaces the default credential if it is the same type. +The following is an example of this message: + +`Job Template default credentials must be replaced +with one of the same type. Please select a credential +for the following types in order to proceed: Machine.` + +- You can add more credentials as you see fit. + +- Credential prompts show up as its own step in a later prompt window. +| Labels a| - Optionally supply labels that describe this job template, such as `dev` or `test`. + +- Use labels to group and filter job templates and completed jobs in the display. + +- Labels are created when they are added to the job template. +Labels are associated with a single Organization by using the Project that is provided in the job template. +Members of the Organization can create labels on a job template if they have edit permissions (such as the admin role). + +- Once you save the job template, the labels appear in the *Job Templates* overview in the Expanded view. + +- Select image:disassociate.png[Disassociate,10,10] beside a label to remove it. +When a label is removed, it is no longer associated with that particular Job or Job Template, but it remains associated with any other jobs that reference it. + +- Jobs inherit labels from the Job Template at the time of launch. +If you delete a label from a Job Template, it is also deleted from the Job. a| - If selected, even if a default value is supplied, you are prompted when launching to supply additional labels, if needed. +- You cannot delete existing labels, selecting image:disassociate.png[Disassociate,10,10] only removes the newly added labels, not existing default labels. +| Forks | The number of parallel or simultaneous processes to use while executing the playbook. +A value of zero uses the Ansible default setting, which is five parallel processes unless overridden in `/etc/ansible/ansible.cfg`. | Yes +| Limit a| A host pattern to further constrain the list of hosts managed or affected by the playbook. You can separate many patterns by colons (:). +As with core Ansible: + +* a:b means "in group a or b" +* a:b:&c means "in a or b but must be in c" +* a:!b means "in a, and definitely not in b" + +For more information, see link:https://docs.ansible.com/ansible/latest/inventory_guide/intro_patterns.html[Patterns: targeting hosts and groups] in the Ansible documentation. | Yes + +If not selected, the job template executes against all nodes in the inventory or only the nodes predefined on the *Limit* field. +When running as part of a workflow, the workflow job template limit is used instead. +| Verbosity | Control the level of output Ansible produces as the playbook executes. +Choose the verbosity from Normal to various Verbose or Debug settings. +This only appears in the *details* report view. +Verbose logging includes the output of all commands. +Debug logging is exceedingly verbose and includes information about SSH operations that can be useful in certain support instances. + +Verbosity `5` causes {ControllerName} to block heavily when jobs are running, which could delay reporting that the job has finished (even though it has) and can cause the browser tab to lock up.| Yes +| Job Slicing | Specify the number of slices you want this job template to run. +Each slice runs the same tasks against a part of the inventory. +For more information about job slices, see link:{URLControllerUserGuide}/controller-job-slicing[Job Slicing]. | Yes +| Timeout a| This enables you to specify the length of time (in seconds) that the job can run before it is canceled. Consider the following for setting the timeout value: + +- There is a global timeout defined in the settings which defaults to 0, indicating no timeout. +- A negative timeout (<0) on a job template is a true "no timeout" on the job. +- A timeout of 0 on a job template defaults the job to the global timeout (which is no timeout by default). +- A positive timeout sets the timeout for that job template. | Yes +| Show Changes | Enables you to see the changes made by Ansible tasks. | Yes +| Instance Groups | Choose link:{URLControllerUserGuide}/controller-instance-and-container-groups#controller-instance-group-policies[Instance and Container Groups] to associate with this job template. +If the list is extensive, use the image:examine.png[examine,15,15] icon to narrow the options. +Job template instance groups contribute to the job scheduling criteria, see link:{URLControllerUserGuide}/assembly-controller-topology-viewer#controller-job-runtime-behavior[Job Runtime Behavior] and link:{URLControllerAdminGuide}/controller-clustering#controller-cluster-job-runtime[Control where a job runs] for rules. +A System Administrator must grant you or your team permissions to be able to use an instance group in a job template. +Use of a container group requires admin rights. a| - Yes. + +If selected, you are providing the jobs preferred instance groups in order of preference. If the first group is out of capacity, later groups in the list are considered until one with capacity is available, at which point that is selected to run the job. + +- If you prompt for an instance group, what you enter replaces the normal instance group hierarchy and overrides all of the organizations' and inventories' instance groups. + +- The Instance Groups prompt shows up as its own step in a later prompt window. +| Job Tags | Type and select the *Create* menu to specify which parts of the playbook should be executed. +For more information and examples see link:https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_tags.html[Tags] in the Ansible documentation. | Yes +| Skip Tags | Type and select the *Create* menu to specify certain tasks or parts of the playbook to skip. +For more information and examples see link:https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_tags.html[Tags] in the Ansible documentation. | Yes +| Extra Variables a| - Pass extra command line variables to the playbook. +This is the "-e" or "-extra-vars" command line parameter for ansible-playbook that is documented in the Ansible documentation at link:https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#defining-variables-at-runtime[Defining variables at runtime]. +- Provide key or value pairs by using either YAML or JSON. +These variables have a maximum value of precedence and overrides other variables specified elsewhere. +The following is an example value: +`git_branch: production +release_version: 1.5` | Yes. + +If you want to be able to specify `extra_vars` on a schedule, you must select *Prompt on launch* for Variables on the job template, or enable a survey on the job template. Those answered survey questions become `extra_vars`. +|=== ++ +. You can set the following options for launching this template, if necessary: +* *Privilege escalation*: If checked, you enable this playbook to run as an administrator. +This is the equal of passing the `--become` option to the `ansible-playbook` command. +* *Provisioning callback*: If checked, you enable a host to call back to {ControllerName} through the REST API and start a job from this job template. +For more information, see link:{URLControllerUserGuide}/controller-job-templates#controller-provisioning-callbacks[Provisioning Callbacks]. +* *Enable webhook*: If checked, you turn on the ability to interface with a predefined SCM system web service that is used to launch a job template. +GitHub and GitLab are the supported SCM systems. +If you enable webhooks, other fields display, prompting for additional information: ++ +//image::ug-job-templates-options-webhooks.png[Job templates webhooks] ++ +** *Webhook service*: Select which service to listen for webhooks from. +** *Webhook URL*: Automatically populated with the URL for the webhook service to POST requests to. +** *Webhook key*: Generated shared secret to be used by the webhook service to sign payloads sent to {ControllerName}. +You must configure this in the settings on the webhook service in order for {ControllerName} to accept webhooks from this service. +** *Webhook credential*: Optionally, give a GitHub or GitLab personal access token (PAT) as a credential to use to send status updates back to the webhook service. ++ +Before you can select it, the credential must exist. ++ +See link:{URLControllerUserGuide}/controller-credentials#ref-controller-credential-types[Credential Types] to create one. +** For additional information about setting up webhooks, see link:{URLControllerUserGuide}/controller-work-with-webhooks[Working with Webhooks]. +* *Concurrent jobs*: If checked, you are allowing jobs in the queue to run simultaneously if not dependent on one another. Check this box if you want to run job slices simultaneously. For more information, see link:{URLControllerUserGuide}/controller-jobs#controller-capacity-determination[{ControllerNameStart} capacity determination and job impact]. +* *Enable fact storage*: If checked, {ControllerName} stores gathered facts for all hosts in an inventory related to the job running. +* *Prevent instance group fallback*: Check this option to allow only the instance groups listed in the *Instance Groups* field to run the job. +If clear, all available instances in the execution pool are used based on the hierarchy described in link:{URLControllerAdminGuide}/controller-clustering#controller-cluster-job-runtime[Control where a job runs]. +. Click btn:[Create job template], when you have completed configuring the details of the job template. + +Creating the template does not exit the job template page but advances to the Job Template *Details* tab. +After saving the template, you can click btn:[Launch template] to start the job. +You can also click btn:[Edit] to add or change the attributes of the template, such as permissions, notifications, view completed jobs, and add a survey (if the job type is not a scan). +You must first save the template before launching, otherwise, btn:[Launch template] remains disabled. + +//image::ug-job-template-details.png[Job template details] + +.Verification + +. From the navigation panel, select {MenuAETemplates}. +. Verify that the newly created template appears on the *Templates* page. diff --git a/downstream/modules/platform/proc-gs-auto-dev-run-template.adoc b/downstream/modules/platform/proc-gs-auto-dev-run-template.adoc new file mode 100644 index 0000000000..a249c15553 --- /dev/null +++ b/downstream/modules/platform/proc-gs-auto-dev-run-template.adoc @@ -0,0 +1,9 @@ +[id="proc-gs-auto-dev-run-template"] + += Running a job template + +One benefit of {ControllerName} is the push-button deployment of Ansible playbooks. +You can configure a template to store all the parameters that you would normally pass to the Ansible playbook on the command line. +In addition to the playbooks, the template passes the inventory, credentials, extra variables, and all options and settings that you can specify on the command line. + +//ADD CONTENT \ No newline at end of file diff --git a/downstream/modules/platform/proc-gs-auto-dev-set-up-decision-env.adoc b/downstream/modules/platform/proc-gs-auto-dev-set-up-decision-env.adoc new file mode 100644 index 0000000000..d1baf9b66c --- /dev/null +++ b/downstream/modules/platform/proc-gs-auto-dev-set-up-decision-env.adoc @@ -0,0 +1,29 @@ +[id="proc-gs-auto-dev-set-up-decision-env"] + += Setting up a new decision environment + +The following steps describe how to import a decision environment into the platform. + +.Prerequisites + +* You have set up any necessary credentials. +For more information, see the link:{URLEDAUserGuide}/eda-credentials#eda-set-up-credential[Setting up credentials] section of the {TitleEDAUserGuide} guide. +* You have pushed a decision environment image to an image repository or you chose to use the image `de-supported` provided at link:http://registry.redhat.io/[registry.redhat.io]. + +.Procedure + +. Navigate to {MenuADDecisionEnvironments}. +. Click btn:[Create decision environment]. +. Enter the following: ++ +Name:: Insert the name. +Description:: This field is optional. +Image:: This is the full image location, including the container registry, image name, and version tag. +Credential:: This field is optional. This is the token needed to use the decision environment image. ++ +. Select btn:[Create decision environment]. + +Your decision environment is now created and can be managed on the *Decision Environments* page. + +After saving the new decision environment, the decision environment's details page is displayed. +From there or the *Decision Environments* list view, you can edit or delete it. diff --git a/downstream/modules/platform/proc-gs-auto-op-launch-template.adoc b/downstream/modules/platform/proc-gs-auto-op-launch-template.adoc new file mode 100644 index 0000000000..40dccfc72f --- /dev/null +++ b/downstream/modules/platform/proc-gs-auto-op-launch-template.adoc @@ -0,0 +1,20 @@ +[id="proc-gs-auto-op-launch-template"] + += Launching a job template + +{PlatformNameShort} offers push-button deployment of Ansible playbooks. +You can configure a template to store all the parameters that you would normally pass to the Ansible playbook on the command line. +In addition to the playbooks, the template passes the inventory, credentials, extra variables, and all options and settings that you can specify on the command line. + +.Procedure + +. From the navigation panel, select {MenuAETemplates}. +. Select a template to view its details. A default job template is created during your initial setup to help you get started, but you can also create your own. +. From the *Templates* page, click the launch icon to run your job template. + +The *Templates* list view shows job templates that are currently available. The default view is collapsed (Compact), showing the template name, template type, and the timestamp of the last job that ran using that template. You can click the arrow icon next to each entry to expand and view more information. This list is sorted alphabetically by name, but you can sort by other criteria, or search by various template fields and attributes. + +From this screen you can launch, edit, and copy a job template. + +For more information about templates see the link:{URLControllerUserGuide}/controller-job-templates[Job Templates] and link:{URLControllerUserGuide}/controller-workflow-job-templates[Workflow job templates] sections of the {TitleControllerUserGuide} guide. + diff --git a/downstream/modules/platform/proc-gs-auto-op-projects.adoc b/downstream/modules/platform/proc-gs-auto-op-projects.adoc new file mode 100644 index 0000000000..afa3de9a85 --- /dev/null +++ b/downstream/modules/platform/proc-gs-auto-op-projects.adoc @@ -0,0 +1,16 @@ +[id="proc-gs-auto-op-projects"] + += Automation execution projects + +A project is a logical collection of Ansible playbooks that you can manage in {PlatformNameShort}. + +Platform administrators and automation developers have the permissions to create projects. +As an automation operator you can view and sync projects. + +== Viewing project details + +The *Projects* page displays a list of projects that are currently available. + +. From the navigation panel, select {MenuAEProjects}. +. Click a project to view its details. +. For each project listed, you can sync the latest revision, edit the project, or copy the project's attributes using the icons next to each project. \ No newline at end of file diff --git a/downstream/modules/platform/proc-gs-auto-op-review-job-output.adoc b/downstream/modules/platform/proc-gs-auto-op-review-job-output.adoc new file mode 100644 index 0000000000..c768194115 --- /dev/null +++ b/downstream/modules/platform/proc-gs-auto-op-review-job-output.adoc @@ -0,0 +1,12 @@ +[id="proc-gs-auto-op-review-job-output"] + += Reviewing job output + +When you relaunch a job, the jobs *Output* view is displayed. + +.Procedure + +. From the navigation panel, select {MenuAEJobs}. +. Select a job. This takes you to the *Output* view for that job, where you can filter job output by these criteria: +* The *Search output* option allows you to search by keyword. +* The *Event* option enables you to filter by the events of interest, such as errors, host failures, host retries, and items skipped. You can include as many events in the filter as necessary. diff --git a/downstream/modules/platform/proc-gs-auto-op-review-job-status.adoc b/downstream/modules/platform/proc-gs-auto-op-review-job-status.adoc new file mode 100644 index 0000000000..0e41a16e6c --- /dev/null +++ b/downstream/modules/platform/proc-gs-auto-op-review-job-status.adoc @@ -0,0 +1,17 @@ +[id="proc-gs-auto-op-review-job-status"] + += Reviewing a job status + +The *Jobs* list view displays a list of jobs and their statuses, shown as completed successfully, failed, or as an active (running) job. + +.Procedure + +. From the navigation panel, select {MenuAEJobs}. ++ +The default view is collapsed (Compact) with the job name, status, job type, start, and finish times. You can click the arrow icon to expand and see more information. You can sort this list by various criteria, and perform a search to filter the jobs of interest. +. From this screen, you can complete the following tasks: +* View a job's details and standard output. +* Relaunch jobs. +* Remove selected jobs. + +The relaunch operation only applies to relaunches of playbook runs and does not apply to project or inventory updates, system jobs, or workflow jobs. diff --git a/downstream/modules/platform/proc-gs-browse-content.adoc b/downstream/modules/platform/proc-gs-browse-content.adoc new file mode 100644 index 0000000000..e55a2c080b --- /dev/null +++ b/downstream/modules/platform/proc-gs-browse-content.adoc @@ -0,0 +1,21 @@ +[id="con-gs-browse-content_{context}"] + += Browse content + +{CertifiedName} are included in your subscription to {PlatformName}. +Using {HubNameMain}, you can access and curate a unique set of collections from all forms of Ansible content. + +Red Hat Ansible content contains two types of content: + +* {CertifiedName} +* {Valid} collections + +{Valid} collections are available in your {PrivateHubName} through the Platform Installer. +When you download {PlatformNameShort} with the bundled installer, validated content is pre-populated into the {PrivateHubName} by default, +but only if you enable the {PrivateHubName} as part of the inventory. + +If you are not using the bundle installer, you can use a Red Hat-supplied Ansible Playbook to install validated content. +For further information, see Ansible validated content. + +You can update validated collections manually by downloading their updated packages in {HubName}. + diff --git a/downstream/modules/platform/proc-gs-creating-a-role.adoc b/downstream/modules/platform/proc-gs-creating-a-role.adoc new file mode 100644 index 0000000000..027b222ac4 --- /dev/null +++ b/downstream/modules/platform/proc-gs-creating-a-role.adoc @@ -0,0 +1,52 @@ +:_newdoc-version: 2.18.3 +:_template-generated: 2024-09-19 +:_mod-docs-content-type: PROCEDURE + +[id="gs-creating-a-role_{context}"] += Creating a role + +.Procedure + +. In your terminal, navigate to the roles directory inside a collection. +. Create a role called `my_role` inside the collection: ++ +[source,bash] +---- +$ansible-galaxy role init my_role +---- ++ +The collection now includes a role named `my_role` inside the `roles` directory, as you can see in this example: ++ +[source,bash] +---- +~/.ansible/collections/ansible_collections// + ... + └── roles/ + └── my_role/ + ├── .travis.yml + ├── README.md + ├── defaults/ + │ └── main.yml + ├── files/ + ├── handlers/ + │ └── main.yml + ├── meta/ + │ └── main.yml + ├── tasks/ + │ └── main.yml + ├── templates/ + ├── tests/ + │ ├── inventory + │ └── test.yml + └── vars/ + └── main.yml +---- ++ +. A custom role skeleton directory can be supplied by using the `--role-skeleton` argument. This allows organizations to create standardized templates for new roles to suit their needs. ++ +[source,bash] +---- +$ansible-galaxy role init my_role --role-skeleton ~/role_skeleton +---- ++ +This creates a role named `my_role` by copying the contents of `~/role_skeleton` into `my_role`. The contents of `role_skeleton` can be any files or folders that are valid inside a role directory. diff --git a/downstream/modules/platform/proc-gs-downloading-content.adoc b/downstream/modules/platform/proc-gs-downloading-content.adoc new file mode 100644 index 0000000000..eedd80cfc5 --- /dev/null +++ b/downstream/modules/platform/proc-gs-downloading-content.adoc @@ -0,0 +1,18 @@ +[id="gs-downloading-content_{context}"] + += Downloading content + +After collections are finalized, you can import them to a location where they can be distributed to others across your organization. + +.Procedure + +. Log in to {PlatformName}. +. From the navigation panel, select {MenuACCollections}. +The *Collections* page displays all collections across all repositories. +You can search for a specific collection. +. Select the collection that you want to export. +The collection details page opens. +. From the *Install* tab, select *Download tarball*. +The `.tar` file is downloaded to your default browser downloads folder. +You can now import it to the location of your choosing. + diff --git a/downstream/modules/platform/proc-gs-eda-set-up-rulebook-activation.adoc b/downstream/modules/platform/proc-gs-eda-set-up-rulebook-activation.adoc new file mode 100644 index 0000000000..c41f319f53 --- /dev/null +++ b/downstream/modules/platform/proc-gs-eda-set-up-rulebook-activation.adoc @@ -0,0 +1,62 @@ +:_newdoc-version: 2.18.3 +:_template-generated: 2024-09-24 +:_mod-docs-content-type: PROCEDURE + +[id="gs-eda-set-up-rulebook-activation_{context}"] += Setting up a rulebook activation + +.Prerequisites + +* You have set up a project. +* You have set up a decision environment. + +.Procedure +. From the navigation panel, select {MenuADRulebookActivations}. +. Click btn:[Create rulebook activation]. +. Enter the following information: +* *Name*: Insert the name. +* *Description*: This field is optional. +* *Organization*: This field is optional. +* *Project*: This field is optional. +* *Rulebook*: Rulebooks are displayed according to the project you selected. +* *Credential*: Select 0 or more credentials for this rulebook activation. This field is optional. ++ +[NOTE] +==== +The credentials that display in this field are customized based on your rulebook activation and only include the following credential types: Vault, {PlatformName}, or any custom credential types that you have created. For more information about credentials, see link:{URLEDAUserGuide}/eda-credentials[Credentials] in the {TitleEDAUserGuide} guide. +==== +//[J. Self] Might need to update the link above for the updated Credentials section. +* *Decision environment*: A decision environment is a container image to run Ansible rulebooks. ++ +[NOTE] +==== +In {EDAcontroller}, you cannot customize the pull policy of the decision environment. +By default, it follows the behavior of the *always* policy. +Every time an activation is started, the system tries to pull the most recent version of the image. +==== ++ +* *Restart policy*: This is the policy that determines how an activation should restart after the container process running the source plugin ends. Select from the following options: +** *Always*: This restarts the rulebook activation immediately, regardless of whether it ends successfully or not, and occurs no more than 5 times. +** *Never*: This never restarts a rulebook activation when the container process ends. +** *On failure*: This restarts the rulebook activation after 60 seconds by default, only when the container process fails, and occurs no more than 5 times. +* *Log level*: This field defines the severity and type of content in your logged events. Select from one of the following options: +** *Error*: Logs that contain error messages that are displayed in the *History* tab of an activation. +** *Info*: Logs that contain useful information about rulebook activations, such as a success or failure, triggered action names and their related action events, and errors. +** *Debug*: Logs that contain information that is only useful during the debug phase and might be of little value during production. This log level includes both error and log level data. +* *Service name*: This defines a service name for Kubernetes to configure inbound connections if the activation exposes a port. This field is optional. +* *Rulebook activation enabled?*: Toggle to automatically enable the rulebook activation to run. +* *Variables*: The variables for the rulebook are in JSON or YAML format. The content would be equivalent to the file passed through the `--vars` flag of ansible-rulebook command. +* *Options*: Check the *Skip audit events* option if you do not want to see your events in the Rule Audit. +. Click btn:[Create rulebook activation]. + +Your rulebook activation is now created and can be managed on the *Rulebook Activations* page. + +After saving the new rulebook activation, the rulebook activation's details page is displayed, with either a *Pending*, *Running*, or *Failed* status. +From there or the *Rulebook Activations* list view, you can restart or delete it. + +[NOTE] +==== +Occasionally, when a source plugin shuts down, it causes a rulebook to exit gracefully after a certain amount of time. +When a rulebook activation shuts down, any tasks that are waiting to be performed will be canceled, and an info level message is sent to the activation log. +For more information, see link:https://ansible.readthedocs.io/projects/rulebook/en/stable/rulebooks.html#[Rulebooks]. +==== diff --git a/downstream/modules/platform/proc-gs-logging-in.adoc b/downstream/modules/platform/proc-gs-logging-in.adoc new file mode 100644 index 0000000000..67462257d4 --- /dev/null +++ b/downstream/modules/platform/proc-gs-logging-in.adoc @@ -0,0 +1,32 @@ +[id="proc-gs-logging-in"] + += Logging in for the first time + +Log in to the {PlatformNameShort} as an administrator and enter your subscription information. +You can then create user profiles and assign roles. + +.Procedure + +. With the login information provided after your installation completed, open a web browser and log in to {PlatformName} by navigating to its server URL at: https:/// +. Use the credentials specified during the installation process to login: +** The default username is *admin*. +** The password for *admin* is the value specified during installation. + +After your first login, you are prompted to add your subscription manifest. + +.Procedure + +. You can select between uploading a copy of your subscription manifest or entering your login credentials to find the subscription associated with your profile: +.. To upload a subscription manifest, drag the file to the field beneath *Red Hat subscription manifest* or browse for the file on your local machine. +.. To find your subscription, click the tab labeled *Username / password* and enter your credentials. +Your subscription appears in the list menu labeled *Subscription*. +Select your subscription. +. After you have added your subscription, click btn:[Next]. +. On the screen labeled *Analytics*, check the box if you want to share data with Red Hat and click btn:[Next]. +. Check the box indicating that you agree to the *End User License Agreement*. +. Review your information and click btn:[Finish]. + +[TIP] +==== +After logging in, review the quick starts section for useful guidance. +==== diff --git a/downstream/modules/platform/proc-gs-platform-admin-create-user.adoc b/downstream/modules/platform/proc-gs-platform-admin-create-user.adoc new file mode 100644 index 0000000000..46660e8ade --- /dev/null +++ b/downstream/modules/platform/proc-gs-platform-admin-create-user.adoc @@ -0,0 +1,33 @@ +[id="proc-gs-platform-admin-create-user"] + += Creating a user + +There are three types of users in {PlatformNameShort}: + +Normal user:: Normal users have read and write access limited to the resources (such as inventory, projects, and job templates) for which that user has been granted the appropriate roles and privileges. Normal users are the default type of user. +{PlatformNameShort} Administrator:: An administrator (also known as a Superuser) has full system administration privileges — with full read and write privileges over the entire installation. An administrator is typically responsible for managing all aspects of and delegating responsibilities for day-to-day work to various users. +{PlatformNameShort} Auditor:: Auditors have read-only capability for all objects within the environment. + +.Procedure +. From the navigation panel, select {MenuAMUsers}. +. Click btn:[Create user]. +. Enter the details about your new user in the fields on the *Create* user page. Fields marked with an asterisk (*) are required. +. Normal users are the default when no User type is specified. To define a user as an administrator or auditor, select a *User type* checkbox. ++ +[NOTE] +==== +If you are modifying your own password, log out and log back in again for it to take effect. +==== ++ +. Select the *Organization* to be assigned for this user. For information about creating a new organization, refer to xref:proc-controller-create-organization[Creating an organization]. +. Click btn:[Create user]. + +When the user is successfully created, the *User* dialog opens. From here, you can review and modify the user’s Teams, Roles, Tokens and other membership details. + +[NOTE] +==== +If the user is not newly-created, the details screen displays the last login activity of that user. +==== + +If you log in as yourself, and view the details of your user profile, you can manage tokens from your user profile by selecting the *Tokens* tab. +// [ddacosta - Removing until OAuth and Applications content is completed.] For more information, see xref:proc-controller-apps-create-tokens[Adding a token]. diff --git a/downstream/modules/platform/proc-gs-publish-to-a-collection.adoc b/downstream/modules/platform/proc-gs-publish-to-a-collection.adoc new file mode 100644 index 0000000000..5a440c6a3d --- /dev/null +++ b/downstream/modules/platform/proc-gs-publish-to-a-collection.adoc @@ -0,0 +1,25 @@ +[id="proc-gs-publish-to-a-collection_{context}"] + += Publishing to a collection + +You can configure your projects to be uploaded to Git, or to the source control manager of your choice. + +.Procedure + +. From the navigation panel, select {MenuAEProjects}. +. Locate or create the project that you want to publish to your source control manager. +. In the project *Details* tab, select *Edit project*. +. Select *Git* from the *Source Control Type* drop-down menu. +. Enter the appropriate details into the following fields: +.. *Source Control URL* - see an example in the tooltip. +.. Optional: *Source control branch/tag/commit*: Enter the SCM branch, tags, commit hashes, arbitrary refs, or revision number (if applicable) from the source control to checkout. Some commit hashes and references might not be available unless you also provide a custom refspec in the next field. If left blank, the default is `HEAD`, which is the last checked out branch, tag, or commit for this project. +.. *Source Control Refspec* - This field is an option specific to Git source control and only advanced users familiar and comfortable with Git should specify which references to download from the remote repository. For more information, see link:{URLControllerUserGuide}/controller-jobs#controller-job-branch-overriding[Job branch overriding]. +.. *Source Control Credential* - If authentication is required, select the appropriate source control credential. +. Optional: *Options* - select the launch behavior, if applicable: +.. *Clean* - Removes any local modifications before performing an update. +.. *Delete* - Deletes the local repository in its entirety before performing an update. Depending on the size of the repository this can significantly increase the amount of time required to complete an update. +.. *Track submodules* - Tracks the latest commit. See the tooltip for more information. +.. *Update Revision on Launch* - Updates the revision of the project to the current revision in the remote source control, and caches the roles directory from link:https://docs.ansible.com/automation-controller/latest/html/userguide/projects.html#ug-galaxy[{Galaxy}] or link:{URLControllerUserGuide}/controller-projects#ref-projects-collections-support[Collections support]. {ControllerNameStart} ensures that the local revision matches and that the roles and collections are up-to-date with the last update. In addition, to avoid job overflows if jobs are spawned faster than the project can synchronize, selecting this enables you to configure a cache timeout to cache previous project synchronizations for a given number of seconds. +.. *Allow Branch Override* - Enables a job template or an inventory source that uses this project to start with a specified SCM branch or revision other than that of the project. For more information, see link:{URLControllerUserGuide}/controller-jobs#controller-job-branch-overriding[Job branch overriding]. +. Click btn:[Save] to save your project. + diff --git a/downstream/modules/platform/proc-gs-social-auth-github.adoc b/downstream/modules/platform/proc-gs-social-auth-github.adoc new file mode 100644 index 0000000000..57404fcf64 --- /dev/null +++ b/downstream/modules/platform/proc-gs-social-auth-github.adoc @@ -0,0 +1,31 @@ +[id="proc-gs-social-auth-github"] + += Configuring GitHub authentication + +You can connect GitHub identities to {PlatformNameShort} using OAuth. To set up GitHub authentication, you need to obtain an OAuth2 key and secret by registering your organization-owned application from GitHub using the link:https://docs.github.com/en/apps/using-github-apps/installing-your-own-github-app[registering the new application with GitHub]. + +The OAuth2 key (Client ID) and secret (Client Secret) are used to supply the required fields in the UI. To register the application, you must supply it with your webpage URL, which is the Callback URL shown in the Authenticator details for your authenticator configuration. +//See xref:gw-display-auth-details[Displaying authenticator details] for instructions on accessing this information. + +.Procedure + +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *GitHub* from the *Authentication type* list and click btn:[Next]. +. Enter a *Name* for this authentication configuration. +. When the application is registered, GitHub displays the *Client ID* and *Client Secret*: ++ +.. Copy and paste the GitHub Client ID into the GitHub OAuth2 Key field. +.. Copy and paste the GitHub Client Secret into the GitHub OAuth2 Secret field. ++ +include::snippets/snip-gw-authentication-additional-auth-fields.adoc[] ++ +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] ++ +. Click btn:[Next]. + +include::snippets/snip-gw-authentication-verification.adoc[] + +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] diff --git a/downstream/modules/platform/proc-gs-upload-collection.adoc b/downstream/modules/platform/proc-gs-upload-collection.adoc new file mode 100644 index 0000000000..41528d4586 --- /dev/null +++ b/downstream/modules/platform/proc-gs-upload-collection.adoc @@ -0,0 +1,34 @@ +[id="proc-gs-upload-collection_{context}"] + += Uploading a collection to {HubName} + +If you want to share a collection that you have created with the rest of the Ansible community, you can upload it to {HubName}. + +[NOTE] + +==== +Sharing a collection with the Ansible community requires getting the collection certified or validated by our Partner Engineering team. This action is available only to partner clients. For more about becoming a partner, see our link:https://connect.redhat.com/en/partner-resources/software-certification-documentation[documentation on software certification]. +==== + +You can upload your collection by using either the {HubName} user interface or the `ansible-galaxy` client. + +.Prerequisites + +* You have configured the `ansible-galaxy` client for {HubName}. +* You have at least one namespace. +* You have run all content through `ansible-test sanity` + +.Procedure + +. From the navigation panel, select {MenuACNamespaces}. +. Within the My namespaces tab, locate and click into the namespace to which you want to upload a collection. +. Select the *Collections* tab, and then click btn:[Upload collection]. +. In the New collection modal, click *Select file*. Locate the file on your system. +. Click btn:[Upload]. + +Using the `ansible-galaxy` client, enter the following command: + +[source,bash] +---- +$ ansible-galaxy collection publish path/to/my_namespace-my_collection-1.0.0.tar.gz --api-key=SECRET +---- diff --git a/downstream/modules/platform/proc-gs-use-base-execution-env.adoc b/downstream/modules/platform/proc-gs-use-base-execution-env.adoc new file mode 100644 index 0000000000..67fb768789 --- /dev/null +++ b/downstream/modules/platform/proc-gs-use-base-execution-env.adoc @@ -0,0 +1,50 @@ +[id="proc-gs-use-base-execution-env_{context}"] + += Using the base automation {ExecEnvShort} + +Your subscription with Ansible Automation Platform gives you access to some base {ExecEnvName}. You can use a base {ExecEnvName} as a starting point for creating a customized {ExecEnvShort}. + +Base images included with {PlatformNameShort} are hosted on the Red Hat Ecosystem Catalog (registry.redhat.io). + +.Prerequisites + +* You have a valid {PlatformName} subscription. + +.Procedure + +. Log in to registry.redhat.io. ++ +[source,bash] +---- +$ podman login registry.redhat.io +---- ++ +. Pull the base images from the registry: +[source,bash] +---- +$podman pull registry.redhat.io/aap/ +---- + +== Customize the base execution environment image + +{PlatformNameShort} includes the following default execution environments: + +* `Minimal` - Includes the latest Ansible-core 2.15 release along with Ansible Runner, but does not include collections or other content +* `EE Supported` - Minimal, plus all Red Hat-supported collections and dependencies + +While these environments cover many automation use cases, you can add additional items to customize these containers for your specific needs. For more information about customizing your execution environment, see link:{URLBuilder}/assembly-publishing-exec-env#proc-customize-ee-image[Customizing an existing automation {ExecEnvShort} image] in the {TitleBuilder} guide. + +== About {Builder} + +You also have the option of creating an entirely new {ExecEnvShort} with {Builder}, also referred to as {ExecEnvShort} builder. +{Builder} is a command line tool you can use to create an {ExecEnvShort} for Ansible. +You can only create {ExecEnvShort}s with {Builder}. + +To build your own {ExecEnvShort}, you must: + +* Download {Builder} +* Create a definition file that defines your {ExecEnvShort} +* Create an {ExecEnvShort} image based on the definition file + +For more information about building an {ExecEnvShort}, see link:{LinkBuilder}. + \ No newline at end of file diff --git a/downstream/modules/platform/proc-gs-write-playbook.adoc b/downstream/modules/platform/proc-gs-write-playbook.adoc new file mode 100644 index 0000000000..3e8520e601 --- /dev/null +++ b/downstream/modules/platform/proc-gs-write-playbook.adoc @@ -0,0 +1,69 @@ +[id="proc-gs-write-playbook"] + += Writing a playbook + +Create a playbook that pings your hosts and prints a "Hello world" message. + +Ansible uses the YAML syntax. +YAML is a human-readable language that enables you to create playbooks without having to learn a complicated coding language. + +.Procedure + +. Create a file named `playbook.yaml` in your `ansible_quickstart` directory, with the following content: ++ +---- +- name: My first play + hosts: myhosts + tasks: + - name: Ping my hosts + ansible.builtin.ping: + + - name: Print message + ansible.builtin.debug: + msg: Hello world +---- +. Run your playbook: ++ +---- +$ ansible-playbook -i inventory.ini playbook.yaml +---- + +Ansible returns the following output: +---- +PLAY [My first play] ******************************************************** + +TASK [Gathering Facts] ****************************************************** +ok: [192.0.2.50] +ok: [192.0.2.51] +ok: [192.0.2.52] + +TASK [Ping my hosts] ******************************************************** +ok: [192.0.2.50] +ok: [192.0.2.51] +ok: [192.0.2.52] + +TASK [Print message] ******************************************************** +ok: [192.0.2.50] => { + "msg": "Hello world" +} +ok: [192.0.2.51] => { + + "msg": "Hello world" +} +ok: [192.0.2.52] => { + "msg": "Hello world" +} + +PLAY RECAP ****************************************************************** +192.0.2.50: ok=3 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +192.0.2.51: ok=3 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +192.0.2.52: ok=3 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + +---- + +.Additional resources + +* For more information on playbooks, see link:{LinkPlaybooksGettingStarted}. +* If you need help writing a playbook, see +link:https://developers.redhat.com/products/ansible/lightspeed?source=sso[{LightspeedFullName}]. + diff --git a/downstream/modules/platform/proc-gw-add-admin-organization.adoc b/downstream/modules/platform/proc-gw-add-admin-organization.adoc new file mode 100644 index 0000000000..682d482a78 --- /dev/null +++ b/downstream/modules/platform/proc-gw-add-admin-organization.adoc @@ -0,0 +1,23 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-add-admin-organization"] + += Adding an administrator to an organization + +You can add administrators to an organization which allows them to manage the membership and settings of the organization. For example, they can create new users and teams within the organization, and grant permission to users within the organization. +To add an administrator to an organization, the user must already exist. + +.Procedure + +. From the navigation panel, select {MenuAMOrganizations}. +. From the Organizations list view, select the organization to which you want to add a user, administrator, or team. +. Click the *Administrators* tab. +. Click btn:[Add administrators]. +. Select the users from the list by clicking the checkbox next to the name to assign the administrator role to them for this organization. +. Click btn:[Add administrators]. +. To remove a particular administrator from the organization, select *Remove administrator* from the *More actions {MoreActionsIcon}* list next to the administrator name. This launches a confirmation dialog, asking you to confirm the removal. ++ +[NOTE] +==== +If the user had previously been added as a member to this organization, they will continue to be a member of this organization. However, if they were added to the organization when the administrator assignment was made, they will be removed from the organization. +==== diff --git a/downstream/modules/platform/proc-gw-add-admin-team.adoc b/downstream/modules/platform/proc-gw-add-admin-team.adoc new file mode 100644 index 0000000000..3388b657de --- /dev/null +++ b/downstream/modules/platform/proc-gw-add-admin-team.adoc @@ -0,0 +1,16 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-add-admin-team"] + += Adding administrators to a team + +You can add administrators to a team which allows them to manage the membership and settings of that team. For example, they can create new users and grant permission to users within the team. +To add an administrator to a team, the administrator must already have been created. For more information, see xref:proc-controller-creating-a-user[Creating a user]. + +.Procedure + +. From the navigation panel, select {MenuAMTeams}. +. Select the team to which you want to add an administrator. +. Select the *Administrators* tab and click btn:[Add administrator(s)]. +. Select one or more users from the list by clicking the checkbox next to the name to add them as administrators of this team. +. Click btn:[Add administrators]. diff --git a/downstream/modules/platform/proc-gw-add-team-organization.adoc b/downstream/modules/platform/proc-gw-add-team-organization.adoc new file mode 100644 index 0000000000..9f1a3b5e43 --- /dev/null +++ b/downstream/modules/platform/proc-gw-add-team-organization.adoc @@ -0,0 +1,29 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-add-team-organization"] + += Adding a team to an organization + +You can provide team access to an organization by adding roles to the team. To add roles to a team, the team must already exist in the organization. For more information, see xref:proc-controller-creating-a-team[Creating a team]. +To add roles for a team, the role must already exist. See xref:proc-gw-create-roles[Creating a role] for more information. + +.Procedure + +. From the navigation panel, select {MenuAMOrganizations}. +. From the Organizations list view, select the organization to which you want to add team access. +. Click the *Teams* tab. If no teams exist, click btn:[Create team] to create a team and add it to this organization. +. Click btn:[Add roles]. +. Select the roles you want the selected team to have. Scroll down for a complete list of roles. ++ +include::snippets/snip-gw-roles-note-multiple-components.adoc[] ++ +. Click btn:[Next] to review the roles settings. +. Click btn:[Finish] to apply the roles to the selected teams. The Add roles dialog displays the updated roles assigned for each team. +. Click btn:[Close]. ++ +[NOTE] +==== +A team with associated roles retains them if they are reassigned to another organization. +==== ++ +. To manage roles for teams in an organization, click the *{SettingsIcon}* icon next to the user and select *Manage roles*. diff --git a/downstream/modules/platform/proc-gw-adjust-mapping-order.adoc b/downstream/modules/platform/proc-gw-adjust-mapping-order.adoc new file mode 100644 index 0000000000..bb497128e3 --- /dev/null +++ b/downstream/modules/platform/proc-gw-adjust-mapping-order.adoc @@ -0,0 +1,22 @@ +:_mod-docs-content-type: PROCEDURE + +[id="gw-adjust-mapping-order"] + += Adjusting the Mapping order + +If you have one or more authenticator maps defined, you can manage the order of the maps. Authenticator maps are run in order when logging in lowest order to highest. If one authenticator map determines a user should be a member of a team but a subsequent map determines the user should not be a member of the same team the ruling form the second map will take precedence over the result of the first map. Authenticator maps with the same order are executed in an undefined order. + +For example, if the first authenticator map is of type `is_superuser` and the trigger is set to *never*, any user logging into the system would never be granted the `is_superuser` flag. + +And, if the second map is of type `is_superuser` and the trigger is based on the user having a specific group, any user logging in would initially be denied the `is_superuser` permission. However, any user with the specified group would subsequently be granted the `is_superuser` permission by the second rule. + +.Procedure + +. Adjust the mapping order by dragging and dropping the mappings up or down in the list using the draggable icon. ++ +[NOTE] +==== +The mapping precedence is determined by the order in which the mappings are listed. +==== ++ +. After your authenticator maps are in the correct order, click btn:[Next] to xref:gw-review-auth-settings[Review the authentication settings]. diff --git a/downstream/modules/platform/proc-gw-allow-mapping.adoc b/downstream/modules/platform/proc-gw-allow-mapping.adoc new file mode 100644 index 0000000000..1dd3d9c226 --- /dev/null +++ b/downstream/modules/platform/proc-gw-allow-mapping.adoc @@ -0,0 +1,20 @@ +:_mod-docs-content-type: CONCEPT + +[id="gw-allow-mapping"] + += Allow mapping + +With allow mapping, you can control which users have access to the system by defining the conditions that must be met. + +.Procedure + +. After configuring the authentication details for your authentication method, select *Allow* from the *Add authentication mapping* list. +. Enter a unique rule *Name* to identify the rule. +. Select a *Trigger* from the list. See xref:gw-authenticator-map-triggers[Authenticator map triggers] for more information about map triggers. +. Select *Revoke* to deny user access to the system when none of the trigger conditions are matched. +. Click btn:[Next]. + +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-mapping-next-steps.adoc[] + diff --git a/downstream/modules/platform/proc-gw-authentication-list-view.adoc b/downstream/modules/platform/proc-gw-authentication-list-view.adoc new file mode 100644 index 0000000000..044614ea95 --- /dev/null +++ b/downstream/modules/platform/proc-gw-authentication-list-view.adoc @@ -0,0 +1,17 @@ +:_mod-docs-content-type: PROCEDURE + +[id="gw-authentication-list-view"] + += Authentication list view + +On the *Authentication Methods* page, you can view and manage the configured authentication methods for your organization. + +.Procedure + +. From the navigation panel, select {MenuAMAuthentication}. ++ +The *Authentication Methods* page is displayed. ++ +. Click btn:[Create authentication] and follow the steps for creating an authentication method in xref:gw-config-authentication-type[Configuring an authentication type]. Otherwise, proceed to step 3. +. From the menu bar, you can sort the list of authentication methods by using the arrows in the menu bar for *Order*, *Name* and *Authentication type*. +. Click the toggles to *Enable* or *Disable* authenticators. \ No newline at end of file diff --git a/downstream/modules/platform/proc-gw-config-keycloak-settings.adoc b/downstream/modules/platform/proc-gw-config-keycloak-settings.adoc new file mode 100644 index 0000000000..5c12ccb125 --- /dev/null +++ b/downstream/modules/platform/proc-gw-config-keycloak-settings.adoc @@ -0,0 +1,42 @@ +:_mod-docs-content-type: PROCEDURE + +[id="gw-keycloak-authentication"] + += Configuring keycloak authentication + +You can configure {PlatformNameShort} to integrate Keycloak to manage user authentication. + +[NOTE] +==== +When using this authenticator some specific setup in your Keycloak instance is required. Refer to the link:https://python-social-auth.readthedocs.io/en/latest/backends/keycloak.html[Python Keycloak reference] for more details. +==== + +.Procedure + +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *Keycloak* from the *Authentication type* list and click btn:[Next]. +. Enter a *Name* for this keycloak configuration. The configuration name is required, must be unique across all authenticators, and must not be longer than 512 characters. +. Enter the location where the user's token can be retrieved in the *Keycloak Access Token URL* field. +. Optional: Enter the redirect location the user is taken to during the login flow in the *Keycloak Provider URL* field. +. Enter the Client ID from your Keycloak installation in the *Keycloak OIDC Key* field. +. Enter the RS256 public key provided by your Keycloak realm in the *Keycloak Public Key* field. +. Enter the OIDC secret (Client Secret) from your Keycloak installation in the *Keycloak OIDC Secret* field. ++ +include::snippets/snip-gw-authentication-additional-auth-fields.adoc[] ++ +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] ++ +. Click btn:[Next]. + +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] + +.Troubleshooting +If you receive an `jwt.exceptions.InvalidAudienceError: Audience doesn't match` error, you must re-enable the audience by doing the following: + +. From the navigation for your Keycloak configuration, select menu:Client scopes[_YOUR-CLIENT-ID-dedicated_ > Add mapper > Audience]. +. Pick a name for the mapper. +. Select the *Client ID* corresponding to your client in `Included Client Audience`. + diff --git a/downstream/modules/platform/proc-gw-configure-auth-details.adoc b/downstream/modules/platform/proc-gw-configure-auth-details.adoc new file mode 100644 index 0000000000..d79b2e6d89 --- /dev/null +++ b/downstream/modules/platform/proc-gw-configure-auth-details.adoc @@ -0,0 +1,40 @@ +:_mod-docs-content-type: PROCEDURE + +[id="gw-configure-auth-details"] + += Configuring authentication details + +Different authenticator plugins require different types of information. See the respective sections in xref:gw-config-authentication-type[Configuring an authentication type] for the required details. + +For all authentication types you can enter a *Name*, *Additional Authenticator Fields* and *Create Objects*. + +.Procedure + +. Enter a unique *Name* for the authenticator. The name is required, must be unique across all authenticators, and must not be longer than 512 characters. This becomes the unique identifier generated for the authenticator. ++ +[NOTE] +==== +Changing the name does not update the unique identifier of the authenticator. For example, if you create an authenticator with the name “My Authenticator” and later change it to “My LDAP Authenticator” you will not be able to create another authenticator with the name “My Authenticator” because the unique identifier is still in use. +==== ++ +. Use the *Additional Authenticator Fields* to send arbitrary data back to the libraries behind the authenticators. This is an advanced feature and any values provided in this field are not validated. ++ +[NOTE] +==== +Values defined in this field override the dedicated fields provided in the UI. For example, if you enter a URL in a dedicated field on this page and then add a URL entry into the Additional Authentication Fields, the URL defined in Additional Authentication Fields overrides the definition in the dedicated field. +==== ++ +. Enable or disable *Enabled* to specify if the authenticator should be enabled or disabled. If enabled, users are able to login from the authenticator. If disabled, users will not be allowed to login from the authenticator. +. Enable or disable *Create Object* to specify whether the authenticator should create teams and organizations in the system when a user logs in. ++ +Enabled:: Teams and organizations defined in the authenticator maps are created and the users added to them. +Disabled:: Organizations and teams defined in the authenticator maps will not be created automatically in the system. However, if they already exist (i.e. created by a superuser), users who trigger the maps are granted access to them. ++ +. Enable or disable *Remove Users*. If enabled, any access previously granted to a user is removed when they authenticate from this source. If disabled, permissions are only added or removed from the user based on the results of this authenticator's authenticator mappings. ++ +For example, assume a user has been granted the `is_superuser` permission in the system. And that user will log into an authenticator whose maps will not formulate an opinion as to whether or not the user should be a superuser. +If *Remove Users* is enabled, the `is_superuser` permission will be removed from the user, the authenticator maps will not have an opinion as to whether it should be there or not so, after login the user will not have the `is_superuser` permission. ++ +If *Remove Users* is disabled, the `is_superuser` permission _will not_ be removed from the user. The authenticator maps will not have an opinion as to whether it should be there or not so after login the user _will_ have the `is_superuser` permission. ++ +. Click btn:[Next] to xref:gw-define-rules-triggers[Define authentication mapping rules and triggers]. diff --git a/downstream/modules/platform/proc-gw-create-roles.adoc b/downstream/modules/platform/proc-gw-create-roles.adoc new file mode 100644 index 0000000000..4717952dd3 --- /dev/null +++ b/downstream/modules/platform/proc-gw-create-roles.adoc @@ -0,0 +1,20 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-create-roles"] + += Creating a role + +{PlatformNameShort} services provide a set of predefined roles with permissions sufficient for standard automation tasks. It is also possible to configure custom roles, and assign one or more permission filters to them. Permission filters define the actions allowed for a specific resource type. + +.Procedure + +. From the navigation panel, select {MenuAMRoles}. +. Select a tab for the component resource for which you want to create custom roles. ++ +include::snippets/snip-gw-roles-note-multiple-components.adoc[] ++ +. Click btn:[Create role]. +. Provide a *Name* and optionally include a *Description* for the role. +. Select a *Content Type*. +. Select the *Permissions* you want assigned to this role. +. Click btn:[Create role] to create your new role. diff --git a/downstream/modules/platform/proc-gw-define-rules-triggers.adoc b/downstream/modules/platform/proc-gw-define-rules-triggers.adoc new file mode 100644 index 0000000000..050a8c6aa6 --- /dev/null +++ b/downstream/modules/platform/proc-gw-define-rules-triggers.adoc @@ -0,0 +1,33 @@ +:_mod-docs-content-type: PROCEDURE + +[id="gw-define-rules-triggers"] + += Defining authentication mapping rules and triggers + +Authentication map types can be used with any type of authenticator. Each map has a trigger that defines when the map should be evaluated as true. + +.Procedure + +. Click btn:[Add authentication mapping] to see the list of available map types and select the map type you want to create. See xref:gw-authenticator-map-types[Authenticator map types] for detailed descriptions of the different map types. Choices include: ++ +* xref:gw-allow-mapping[Allow] +* xref:ref-controller-organization-mapping[Organization] +* xref:ref-controller-team-mapping[Team] +* xref:gw-role-mapping[Role] +* xref:gw-superuser-mapping[Is Superuser] ++ +. Enter a unique rule *Name* to identify the rule. +. Select a *Trigger* from the list. See xref:gw-authenticator-map-triggers[Authenticator map triggers] for more details. Choices include: ++ +* *Always* +* *Never* +* *Group* +* *Attribute* ++ +. Repeat steps 1-3 to add additional triggers to the authenticator. +. Click btn:[Next] to optionally xref:gw-adjust-mapping-order[Adjust the Mapping order]. ++ +[NOTE] +==== +The mapping order setting is only available if there is more than one authenticator map defined. +==== diff --git a/downstream/modules/platform/proc-gw-delete-authenticator.adoc b/downstream/modules/platform/proc-gw-delete-authenticator.adoc new file mode 100644 index 0000000000..859cc93144 --- /dev/null +++ b/downstream/modules/platform/proc-gw-delete-authenticator.adoc @@ -0,0 +1,18 @@ +:_mod-docs-content-type: PROCEDURE + +[id="gw-delete-authenticator"] + += Deleting an authenticator + +You can modify the settings of previously configured authenticators from the *Authentication* list view. + +.Procedure + +. From the navigation panel, select {MenuAMAuthentication}. +. In the list view, select the checkbox next to the authenticator you want to delete. +. Select *Delete authentication* from the *{MoreActionsIcon}* list. ++ +[NOTE] +==== +You can delete multiple authenticators by selecting the checkbox next to each authenticator you want to remove, and clicking *Delete selected authentication* from the *{MoreActionsIcon}* list on the menu bar. +==== \ No newline at end of file diff --git a/downstream/modules/platform/proc-gw-delete-organization.adoc b/downstream/modules/platform/proc-gw-delete-organization.adoc new file mode 100644 index 0000000000..9eeaeb89b9 --- /dev/null +++ b/downstream/modules/platform/proc-gw-delete-organization.adoc @@ -0,0 +1,22 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-delete-organization"] + += Deleting an organization + +Before you can delete an organization, you must be an Organization administrator or System administrator. When you delete an organization, the organization, team, users and resources are permanently removed from {PlatformNameShort}. + +[NOTE] +==== +When you attempt to delete items that are used by other resources, a message is displayed warning you that the deletion might impact other resources and prompts you to confirm the deletion. Some screens contain items that are invalid or have been deleted previously, and will fail to run. +==== + +.Procedure +. From the navigation panel, select {MenuAMOrganizations}. +. Click the *{MoreActionsIcon}* icon next to the organization you want removed and select *Delete organization*. +. Select the confirmation checkbox and click btn:[Delete organizations] to proceed with the deletion. Otherwise, click btn:[Cancel]. ++ +[NOTE] +==== +You can delete multiple organizations by selecting the checkbox next to each organization you want to remove, and selecting *Delete selected organizations* from the *More actions {MoreActionsIcon}* list on the menu bar. +==== diff --git a/downstream/modules/platform/proc-gw-delete-roles.adoc b/downstream/modules/platform/proc-gw-delete-roles.adoc new file mode 100644 index 0000000000..e67cde04ee --- /dev/null +++ b/downstream/modules/platform/proc-gw-delete-roles.adoc @@ -0,0 +1,17 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-delete-roles"] + += Deleting a role + +Built in roles can not be deleted, however, you can delete custom roles from the *Roles* list view. + +.Procedure + +. From the navigation panel, select {MenuAMRoles}. +. Select a tab for the component resource for which you want to create custom roles. ++ +include::snippets/snip-gw-roles-note-multiple-components.adoc[] ++ +. Click the *More Actions* icon *{MoreActionsIcon}* next to the role you want and select *Delete role*. +. To delete roles in bulk, select the roles you want to delete from the *Roles* list view, click the *More Actions* icon *{MoreActionsIcon}*, and select *Delete roles*. diff --git a/downstream/modules/platform/proc-gw-delete-team.adoc b/downstream/modules/platform/proc-gw-delete-team.adoc new file mode 100644 index 0000000000..42b74f96b5 --- /dev/null +++ b/downstream/modules/platform/proc-gw-delete-team.adoc @@ -0,0 +1,18 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-delete-team"] + += Deleting a team + +Before you can delete a team, you must have team permissions. When you delete a team, the inherited permissions members got from that team are revoked. + +.Procedure + +. From the navigation panel, select {MenuAMTeams}. +. Select the check box for the team that you want to remove. +. Select the {MoreActionsIcon} icon and select *Delete team*. ++ +[NOTE] +==== +You can delete multiple teams by selecting the checkbox next to each team you want to remove, and selecting *Delete teams* from the *More actions {MoreActionsIcon}* list. +==== diff --git a/downstream/modules/platform/proc-gw-display-auth-details.adoc b/downstream/modules/platform/proc-gw-display-auth-details.adoc new file mode 100644 index 0000000000..44e24c5055 --- /dev/null +++ b/downstream/modules/platform/proc-gw-display-auth-details.adoc @@ -0,0 +1,16 @@ +:_mod-docs-content-type: PROCEDURE + +[id="gw-display-auth-details"] + += Displaying authenticator details + +After you locate the authenticator you want to review, you can display the configuration details: + +.Procedure + +. From the navigation panel, select {MenuAMAuthentication}. +. In the list view, select the authenticator name displayed in the *Name* column. ++ +The authenticator *Details* page is displayed. ++ +. From the *Details* page, you can review the configuration settings applied to the authenticator. diff --git a/downstream/modules/platform/proc-gw-edit-authenticator.adoc b/downstream/modules/platform/proc-gw-edit-authenticator.adoc new file mode 100644 index 0000000000..d52d7b0c4b --- /dev/null +++ b/downstream/modules/platform/proc-gw-edit-authenticator.adoc @@ -0,0 +1,18 @@ +:_mod-docs-content-type: PROCEDURE + +[id="gw-edit-authenticator"] + += Editing an authenticator + +You can modify the settings of previously configured authenticators from the *Authentication* list view. + +.Procedure + +. From the navigation panel, select {MenuAMAuthentication}. +. In the list view, you can either: ++ +.. Select the btn:[Edit] image:leftpencil.png[Edit,15,15] icon next to authenticator you want to modify, or +.. Select the authenticator name displayed in the *Name* column and click btn:[Edit authenticator] from the *Details* page. ++ +. Modify the authentication details or mapping configurations as required. +. Click btn:[Save]. \ No newline at end of file diff --git a/downstream/modules/platform/proc-gw-edit-roles.adoc b/downstream/modules/platform/proc-gw-edit-roles.adoc new file mode 100644 index 0000000000..192a43733d --- /dev/null +++ b/downstream/modules/platform/proc-gw-edit-roles.adoc @@ -0,0 +1,17 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-edit-roles"] + += Editing a role + +Built in roles can not be changed, however, you can modify custom roles from the *Roles* list view. The *Editable* column in the *Roles* list view indicates whether a role is _Built-in_ or _Editable_. + +.Procedure + +. From the navigation panel, select {MenuAMRoles}. +. Select a tab for the component resource for which you want to modify a custom role. ++ +include::snippets/snip-gw-roles-note-multiple-components.adoc[] ++ +. Click the *Edit role* icon image:leftpencil.png[Edit,15,15] next to the role you want and modify the role settings as needed. +. Click btn:[Save role] to save your changes. diff --git a/downstream/modules/platform/proc-gw-editing-a-user.adoc b/downstream/modules/platform/proc-gw-editing-a-user.adoc new file mode 100644 index 0000000000..527e591176 --- /dev/null +++ b/downstream/modules/platform/proc-gw-editing-a-user.adoc @@ -0,0 +1,39 @@ +:_mod-docs-content-type: PROCEDURE + +[id="gw-editing-a-user"] + += Editing a user + +You can modify the properties of a user account after it is created. + +In upgrade scenarios, there might be pre-existing user accounts from {ControllerName} or {HubName} services. When editing these user accounts, the *User type* checkboxes indicate whether the account had one of the following service level administrator privileges: + +Automation Execution Administrator:: A previously defined {ControllerName} administrator with full read and write privileges over automation execution resources only. +Automation Decisions Administrator:: A previously defined {EDAName} administrator with full read and write privileges over automation decision resources only. +Automation Content Administrator:: A previously defined {HubName} administrator with full read and write privileges over automation content resources only. + +Platform administrators can revoke or assign administrator permissions for the individual services and designate the user as either an *{PlatformNameShort} Administrator*, *{PlatformNameShort} Auditor* or normal user. Assigning administrator privileges to all of the individual services automatically designates the user as an *{PlatformNameShort} Administrator*. See xref:proc-controller-creating-a-user[Creating a user] for more information about user types. + +To see whether a user had service level auditor privileges, you must refer to the API. + +[NOTE] +==== +Users previously designated as {ControllerName} or {HubName} administrators are labeled as *Normal* in the *User type* column in the xref:proc-gw-users-list-view[Users list view]. You can see whether these users have administrator privileges, from the *Edit Users* page. +==== + +.Procedure + +. From the navigation panel, select {MenuAMUsers}. + +. Select the check box for the user that you want to modify. + +. Click the *Pencil* icon and select *Edit user*. + +. The *Edit* user page is displayed where you can modify user details such as, *Password*, *Email*, *User type*, and *Organization*. ++ +[NOTE] +==== +If the user account was migrated to {PlatformNameShort} 2.5 during the upgrade process and had administrator privileges for an individual service, additional User type checkboxes will be available. You can use these checkboxes to revoke or add individual privileges or designate the user as a platform administrator, system auditor or normal user. +==== ++ +. After your changes are complete, click *Save user*. \ No newline at end of file diff --git a/downstream/modules/platform/proc-gw-local-authentication.adoc b/downstream/modules/platform/proc-gw-local-authentication.adoc new file mode 100644 index 0000000000..7708c6feab --- /dev/null +++ b/downstream/modules/platform/proc-gw-local-authentication.adoc @@ -0,0 +1,29 @@ +:_mod-docs-content-type: PROCEDURE + +[id="gw-local-authentication"] + += Configuring local authentication + +As a platform administrator, you can configure local system authentication. With local authentication, users and their passwords are checked against local system accounts. + +[NOTE] +==== +A local authenticator is automatically created by the {PlatformNameShort} installation process, and is configured with the specified admin credentials in the inventory file before installation. After successful installation, you can log in to the {PlatformNameShort} using those credentials. +==== + +.Procedure + +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. +. Select *Local* from the *Authentication type* list and click btn:[Next]. +. Enter a *Name* for this Local configuration. The configuration name is required, must be unique across all authenticators, and must not be longer than 512 characters. ++ +include::snippets/snip-gw-authentication-additional-auth-fields.adoc[] ++ +include::snippets/snip-gw-authentication-common-checkboxes.adoc[] ++ +. Click btn:[Next]. + +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-authentication-next-steps.adoc[] \ No newline at end of file diff --git a/downstream/modules/platform/proc-gw-organizations-exec-env.adoc b/downstream/modules/platform/proc-gw-organizations-exec-env.adoc new file mode 100644 index 0000000000..757439bc63 --- /dev/null +++ b/downstream/modules/platform/proc-gw-organizations-exec-env.adoc @@ -0,0 +1,25 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-organizations-exec-env"] + += Working with {ExecEnvShort}s + +When {ControllerName} is enabled on the platform, you can review any {ExecEnvShort}s you have set up and manage their settings within the organization resource. + +For more information about execution environments, see link:{URLControllerUserGuide}/assembly-controller-execution-environments[Execution environments] in _{TitleControllerUserGuide}_ guide. + + +.Procedure + +. From the navigation panel, select {MenuAMOrganizations}. +. From the Organizations list view, select the organization whose {ExecEnvShort}s you want to manage. +. Select the *Execution Environments* tab. +. If no {ExecEnvShort}s are available, click btn:[Create {ExecEnvShort}] to create one. Alternatively, you can create an {ExecEnvShort} from the navigation panel by selecting {MenuInfrastructureExecEnvironments}. +. Click btn:[Create {ExecEnvShort}]. ++ +[NOTE] +==== +After creating a new {ExecEnvShort}s, return to {MenuAMOrganizations} and select the organization in which you created the {ExecEnvShort} to update the list on that tab. +==== ++ +. Select the {ExecEnvShort}s to use with your particular organization. diff --git a/downstream/modules/platform/proc-gw-remove-roles-team.adoc b/downstream/modules/platform/proc-gw-remove-roles-team.adoc new file mode 100644 index 0000000000..3eae34284e --- /dev/null +++ b/downstream/modules/platform/proc-gw-remove-roles-team.adoc @@ -0,0 +1,18 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-remove-roles-team"] + += Removing roles from a team + +You can remove roles from a team by selecting the - icon next to the resource. This launches a confirmation dialog, asking you to confirm the removal. + +.Procedure + +. From the navigation panel, select {MenuAMTeams}. +. Select the team *Name* from which you want to remove roles. +. Select the *Roles* tab. ++ +include::snippets/snip-gw-roles-note-multiple-components.adoc[] ++ +. Select the checkbox next to each resource you want to remove and click *Remove selected roles* from the *{MoreActionsIcon}* list on the menu bar. +. Select the checkbox to confirm removal of the selected roles and click *Remove role*. diff --git a/downstream/modules/platform/proc-gw-remove-roles-user.adoc b/downstream/modules/platform/proc-gw-remove-roles-user.adoc new file mode 100644 index 0000000000..f6e47671ce --- /dev/null +++ b/downstream/modules/platform/proc-gw-remove-roles-user.adoc @@ -0,0 +1,17 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-remove-roles-user"] + += Removing roles from a user +You can remove roles from a user by selecting the *-* icon next to the resource. This launches a confirmation dialog, asking you to confirm the removal. + +.Procedure + +. From the navigation panel, select {MenuAMUsers}. +. Select the user Name from which you want to remove roles. +. Select the *Roles* tab. ++ +include::snippets/snip-gw-roles-note-multiple-components.adoc[] ++ +. Select the checkbox next to each resource you want to remove and click *Remove selected roles* from the *More actions {MoreActionsIcon}* list on the menu bar. +. Select the checkbox to confirm removal of the selected roles and click btn:[Remove role]. diff --git a/downstream/modules/platform/proc-gw-review-auth-settings.adoc b/downstream/modules/platform/proc-gw-review-auth-settings.adoc new file mode 100644 index 0000000000..b6118d2430 --- /dev/null +++ b/downstream/modules/platform/proc-gw-review-auth-settings.adoc @@ -0,0 +1,14 @@ +:_mod-docs-content-type: PROCEDURE + +[id="gw-review-auth-settings"] + += Reviewing the authentication settings + +After you have defined the authentication details, configured the authentication maps, and specified the mapping order precedence, you can review and verify, or modify the settings before creating the authenticator. + +.Procedure + +. Review and verify the authentication settings. +. Click btn:[Finish] to create the authenticator. ++ +A notification is displayed if there are any issues with the authenticator or the map. If you encounter issues, click btn:[Back] or select a wizard section from the wizard menu to go back and add missing data or correct inaccurate data. \ No newline at end of file diff --git a/downstream/modules/platform/proc-gw-role-mapping.adoc b/downstream/modules/platform/proc-gw-role-mapping.adoc new file mode 100644 index 0000000000..16d98ff942 --- /dev/null +++ b/downstream/modules/platform/proc-gw-role-mapping.adoc @@ -0,0 +1,27 @@ +:_mod-docs-content-type: PROCEDURE + +[id="gw-role-mapping"] + += Role mapping + +Role mapping is the mapping of a user either to a global role, such as Platform Auditor, or team or organization role. + +When a Team and/or Organization is specified together with the appropriate Role, the behavior is identical with Organization mapping or Team mapping. + +Role mapping can be specified separately for each account authentication. + +.Procedure + +. After configuring the authentication details for your authentication type, select *Role* from the *Add authentication mapping* list. +. Enter a unique rule *Name* to identify the rule. +. Select a *Trigger* from the list. See xref:gw-authenticator-map-triggers[Authenticator map triggers] for more information about map triggers. +. Select *Revoke* to remove the role for the user when none of the trigger conditions are matched. +. Select a *Role* to be applied or removed for matching users. +. Click btn:[Next]. + +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-mapping-next-steps.adoc[] + + + diff --git a/downstream/modules/platform/proc-gw-roles.adoc b/downstream/modules/platform/proc-gw-roles.adoc new file mode 100644 index 0000000000..8c2314b6d5 --- /dev/null +++ b/downstream/modules/platform/proc-gw-roles.adoc @@ -0,0 +1,17 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-roles"] + += Displaying roles + +You can display the roles assigned for component resources from the menu:Access Management[] menu. + +.Procedure + +. From the navigation panel, select {MenuAMRoles}. +. Select a tab for the component resource for which you want to create custom roles. ++ +include::snippets/snip-gw-roles-note-multiple-components.adoc[] ++ +. From the table header, you can sort the list of roles by using the arrows for *Name*, *Description*, *Created* and *Editable* or by making sort selections in the *Sort* list. +. You can filter the list of roles by selecting *Name* or *Editable* from the filter list and clicking the arrow. diff --git a/downstream/modules/platform/proc-gw-searching-authenticator.adoc b/downstream/modules/platform/proc-gw-searching-authenticator.adoc new file mode 100644 index 0000000000..b3bd1e0f52 --- /dev/null +++ b/downstream/modules/platform/proc-gw-searching-authenticator.adoc @@ -0,0 +1,14 @@ +:_mod-docs-content-type: PROCEDURE + +[id="gw-searching-authenticator"] + += Searching for an authenticator + +You can search for a previously configured authenticator from the Authentication list view. + +.Procedure + +. From the navigation panel, select {MenuAMAuthentication}. +. In the search bar, enter an appropriate keyword for the authentication method you want to search for and click the arrow icon. +. If you don’t find what you’re looking for, you can narrow your search. From the filter list, select *Name* or *Authentication type* depending on the search term you want to use. +. Scroll through the list of search results and select the authenticator you want to review. diff --git a/downstream/modules/platform/proc-gw-select-auth-type.adoc b/downstream/modules/platform/proc-gw-select-auth-type.adoc new file mode 100644 index 0000000000..ce88d59776 --- /dev/null +++ b/downstream/modules/platform/proc-gw-select-auth-type.adoc @@ -0,0 +1,18 @@ +:_mod-docs-content-type: PROCEDURE + +[id="gw-select-auth-type"] + += Selecting an authentication type + +On the first screen of the wizard you can select the type of authenticator plugin you want to configure. + +.Procedure + +. From the navigation panel, select {MenuAMAuthentication}. +. Click btn:[Create authentication]. ++ +The *Create Authentication* wizard is displayed, where you can follow the prompts to configure your preferred authentication method. ++ +. Select the authenticator type from the *Authentication type* list. See xref:gw-config-authentication-type[Configuring an authentication type] for the complete list of authentication plugins available. ++ +. Click btn:[Next] to xref:gw-configure-auth-details[Configure authentication details]. diff --git a/downstream/modules/platform/proc-gw-superuser-mapping.adoc b/downstream/modules/platform/proc-gw-superuser-mapping.adoc new file mode 100644 index 0000000000..d611c2d712 --- /dev/null +++ b/downstream/modules/platform/proc-gw-superuser-mapping.adoc @@ -0,0 +1,19 @@ +:_mod-docs-content-type: PROCEDURE + +[id="gw-superuser-mapping"] + += Superuser mapping + +Superuser mapping is the mapping of a user to the superuser role, such as System Administrator. + +.Procedure + +. After configuring the authentication details for your authentication type, select *Superuser* from the *Add authentication mapping* list. +. Enter a unique rule *Name* to identify the rule. +. Select a *Trigger* from the list. See xref:gw-authenticator-map-triggers[Authenticator map triggers] for more information about map triggers. +. Select *Revoke* to remove the superuser role from the user when none of the trigger conditions are matched. +. Click btn:[Next]. + +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-mapping-next-steps.adoc[] \ No newline at end of file diff --git a/downstream/modules/platform/proc-gw-team-access-resources.adoc b/downstream/modules/platform/proc-gw-team-access-resources.adoc new file mode 100644 index 0000000000..9276e46dd9 --- /dev/null +++ b/downstream/modules/platform/proc-gw-team-access-resources.adoc @@ -0,0 +1,16 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-team-access"] + += Providing team access to a resource +You can grant users access based on their team membership. When you add a user as a member of a team, they inherit access to the roles and resources defined for that team. + +.Procedure + +. From the navigation panel, select a resource to which you want to provide team access. For example, {MenuAETemplates}. +. Select the *Team Access* tab. +. Click btn:[Add roles]. +. Click the checkbox beside the team to assign that team to your chosen type of resource and click btn:[Next]. +. Select the roles you want applied to the team for the chosen resource and click btn:[Next]. +. Review the settings and click btn:[Finish]. The Add roles dialog displays indicating whether the role assignments were successfully applied. +. You can remove resource access for a team by selecting the *Remove role* icon next to the team. This launches a confirmation dialog, asking you to confirm the removal. diff --git a/downstream/modules/platform/proc-gw-team-add-user.adoc b/downstream/modules/platform/proc-gw-team-add-user.adoc new file mode 100644 index 0000000000..8918eb6eee --- /dev/null +++ b/downstream/modules/platform/proc-gw-team-add-user.adoc @@ -0,0 +1,27 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-team-add-user"] + += Adding users to a team +To add a user to a team, the user must already have been created. For more information, see xref:proc-controller-creating-a-user[Creating a user]. Adding a user to a team adds them as a member only. Use the *Roles* tab to assign a role for different resources to the selected team. + +The following tab selections are available when adding users to a team. When user accounts from {ControllerName} or {HubName} organizations have been migrated to {PlatformNameShort} 2.5 during the upgrade process, the *Automation Execution* and *Automation Content* tabs show content based on whether the users were added to those organizations prior to migration. + +{PlatformNameShort}:: Reflects all users added to the organization at the platform level. From this tab, you can add users as organization members and, optionally provide specific organization level roles. + +Automation Execution:: Reflects users that were added directly to the {ControllerName} organization prior to an upgrade and migration. From this tab, you can only view existing memberships in {ControllerName} and remove those memberships but you can not add new memberships. New organization memberships must be added through the platform. + +Automation Content:: Reflects users that were added directly to the {HubName} organization prior to an upgrade and migration. From this tab, you can only view existing memberships in {HubName} and remove those memberships but you can not add new memberships. + +New user memberships to a team must be added at the platform level. + + +.Procedure + +. From the navigation panel, select {MenuAMTeams}. +. Select the team to which you want to add users. +. Select the *Users* tab. +. Select the *{PlatformNameShort}* tab and click btn:[Add users] to add user access to the team, or select the *Automation Execution* or *Automation Content* tab to view or remove user access from the team. +. Select one or more users from the list by clicking the checkbox next to the name to add them as members of this team. +. Click btn:[Add users]. + \ No newline at end of file diff --git a/downstream/modules/platform/proc-gw-team-list-view.adoc b/downstream/modules/platform/proc-gw-team-list-view.adoc new file mode 100644 index 0000000000..ecdf03b30f --- /dev/null +++ b/downstream/modules/platform/proc-gw-team-list-view.adoc @@ -0,0 +1,15 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-team-list-view"] + += Teams list view + +The Teams page displays the existing teams for your installation. From here, you can search for a specific team, filter the list of teams by team name or organization, or change the sort order for the list. + +.Procedure + +. From the navigation panel, select {MenuAMTeams}. +. In the *Search* bar, enter an appropriate keyword for the team you want to search for and click the arrow icon. +. From the menu bar, you can sort the list of teams by using the arrows for *Name* and *Organization* to toggle your sorting preference. +. You can view team details by clicking a team *Name* on the *Teams* page. +. You can view organization details by clicking the link in the *Organization* column. diff --git a/downstream/modules/platform/proc-gw-team-remove-user.adoc b/downstream/modules/platform/proc-gw-team-remove-user.adoc new file mode 100644 index 0000000000..19780da7c2 --- /dev/null +++ b/downstream/modules/platform/proc-gw-team-remove-user.adoc @@ -0,0 +1,22 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-team-remove-user"] + += Removing users from a team + +You can remove a user from a team from the Team list view. + +.Procedure + +. From the navigation panel, select {MenuAMTeams}. +. Select the team from which you want to remove users. +. Select the *Users* tab. +. Click the *Remove user* icon next to the user you want to remove as a member of the team. +. You can delete multiple users by selecting the checkbox next to each user you want to remove, and selecting *Remove selected users* from the *More actions {MoreActionsIcon}* list. ++ +[NOTE] +==== +If the user is a Team administrator, you can remove their membership to the team from the *Administrators* tab. +==== ++ +This launches a confirmation dialog, asking you to confirm the removal. diff --git a/downstream/modules/platform/proc-gw-user-access-resources.adoc b/downstream/modules/platform/proc-gw-user-access-resources.adoc new file mode 100644 index 0000000000..14c75dd79e --- /dev/null +++ b/downstream/modules/platform/proc-gw-user-access-resources.adoc @@ -0,0 +1,17 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-user-access-resources"] + += Providing user access to a resource + +You can grant users access to resources through the roles to which they are assigned. + +.Procedure + +. From the navigation panel, select a resource to which you want to provide team access. For example, {MenuAETemplates}. +. Select the *User access* tab. +. Click btn:[Add roles]. +. Click the checkbox beside the user to assign that user to your chosen type of resource and click btn:[Next]. +. Select the roles you want applied to the user for the chosen resource and click btn:[Next]. +. Review the settings and click btn:[Finish]. The Add roles dialog displays indicating whether the role assignments were successfully applied. +. You can remove resource access for a user by selecting the *Remove role* icon next to the user. This launches a confirmation dialog, asking you to confirm the removal. diff --git a/downstream/modules/platform/proc-gw-users-list-view.adoc b/downstream/modules/platform/proc-gw-users-list-view.adoc new file mode 100644 index 0000000000..b95c7cd926 --- /dev/null +++ b/downstream/modules/platform/proc-gw-users-list-view.adoc @@ -0,0 +1,16 @@ +:_mod-docs-content-type: PROCEDURE + +[id="proc-gw-users-list-view"] + += Users list view + +The *Users* page displays the existing users for your installation. From here, you can search for a specific user, filter the list of users, or change the sort order for the list. + +When user accounts have been migrated to {PlatformNameShort} 2.5 during the upgrade process, these accounts are also displayed in the *Users* list view. Users previously designated as {ControllerName} or {HubName} administrators are labeled as *Normal* in the *User type* column. You can see whether these users have administrator privileges, by editing the account. See xref:gw-editing-a-user[Editing a user] for instructions. + +.Procedure + +. From the navigation panel, select {MenuAMUsers}. +. In the *Search* bar, enter an appropriate keyword for the user you want to search for and click the arrow icon. +. From the menu bar, you can sort the list of users by using the arrows for *Username*, *Email*, *First name*, *Last name* or *Last login* to toggle your sorting preference. +. You can view user details by selecting a *Username* from the *Users* list view. diff --git a/downstream/modules/platform/proc-hs-eda-setup.adoc b/downstream/modules/platform/proc-hs-eda-setup.adoc new file mode 100644 index 0000000000..52583ff5f6 --- /dev/null +++ b/downstream/modules/platform/proc-hs-eda-setup.adoc @@ -0,0 +1,51 @@ +[id="proc-hs-eda-setup"] + += Setting up horizontal scaling for {EDAcontroller} + +[role=_abstract] + +To scale up (add more nodes) or scale down (remove nodes), you must update the content of the inventory file to add or remove nodes and rerun the installation program. + + +// Procedure for RPM installer +ifdef::aap-install[] +.Procedure +. Update the inventory to add two more worker nodes: ++ +----- +[automationedacontroller] + +3.88.116.111 routable_hostname=automationedacontroller-api.example.com eda_node_type=api + +3.88.116.112 routable_hostname=automationedacontroller-api.example.com eda_node_type=worker + +# two more worker nodes +3.88.116.113 routable_hostname=automationedacontroller-api.example.com eda_node_type=worker + +3.88.116.114 routable_hostname=automationedacontroller-api.example.com eda_node_type=worker +----- ++ +. Re-run the installer. +endif::aap-install[] + + +// Procedure for Containerized installer +ifdef::container-install[] +.Procedure +. Update the inventory to add two more worker nodes: ++ +----- +[automationeda] + +3.88.116.111 routable_hostname=automationeda-api.example.com eda_type=api + +3.88.116.112 routable_hostname=automationeda-api.example.com eda_type=worker + +# two more worker nodes +3.88.116.113 routable_hostname=automationeda-api.example.com eda_type=worker + +3.88.116.114 routable_hostname=automationeda-api.example.com eda_type=worker +----- ++ +. Re-run the installer. +endif::container-install[] diff --git a/downstream/modules/platform/proc-hub-ingress-options.adoc b/downstream/modules/platform/proc-hub-ingress-options.adoc index 2c6d95cb32..c935d8ba89 100644 --- a/downstream/modules/platform/proc-hub-ingress-options.adoc +++ b/downstream/modules/platform/proc-hub-ingress-options.adoc @@ -1,11 +1,17 @@ [id="proc-hub-ingress-options_{context}"] -= Configuring the Ingress type for your {HubName} operator += Configuring the ingress type for your {HubName} operator -The {PlatformName} operator installation form allows you to further configure your {HubName} operator Ingress under *Advanced configuration*. +The {OperatorPlatformNameShort} installation form allows you to further configure your {HubName} operator ingress under *Advanced configuration*. .Procedure +. Log in to {OCP}. +. Navigate to menu:Operators[Installed Operators]. +. Select your {OperatorPlatformNameShort} deployment. +. Select the *Automation Hub* tab. +. For new instances, click btn:[Create AutomationHub]. +.. For existing instances, you can edit the YAML view by clicking the {MoreActionsIcon} icon and then btn:[Edit AutomationHub]. . Click btn:[Advanced Configuration]. . Under *Ingress type*, click the drop-down menu and select *Ingress*. . Under *Ingress annotations*, enter any annotations to add to the ingress. diff --git a/downstream/modules/platform/proc-hub-route-options.adoc b/downstream/modules/platform/proc-hub-route-options.adoc index d1d8220bbb..829e48d680 100644 --- a/downstream/modules/platform/proc-hub-route-options.adoc +++ b/downstream/modules/platform/proc-hub-route-options.adoc @@ -6,6 +6,12 @@ The {PlatformName} operator installation form allows you to further configure yo .Procedure +. Log in to {OCP}. +. Navigate to menu:Operators[Installed Operators]. +. Select your {OperatorPlatformNameShort} deployment. +. Select the *Automation Hub* tab. +. For new instances, click btn:[Create AutomationHub]. +.. For existing instances, you can edit the YAML view by clicking the {MoreActionsIcon} icon and then btn:[Edit AutomationHub]. . Click btn:[Advanced configuration]. . Under *Ingress type*, click the drop-down menu and select *Route*. . Under *Route DNS host*, enter a common host name that the route answers to. diff --git a/downstream/modules/platform/proc-install-aap-operator.adoc b/downstream/modules/platform/proc-install-aap-operator.adoc index 327bbba38a..1ea96def6e 100644 --- a/downstream/modules/platform/proc-install-aap-operator.adoc +++ b/downstream/modules/platform/proc-install-aap-operator.adoc @@ -3,14 +3,50 @@ .Procedure . Log in to {OCP}. . Navigate to menu:Operators[OperatorHub]. -. Search for the {PlatformName} operator and click btn:[Install]. +. Search for {PlatformNameShort} and click btn:[Install]. . Select an *Update Channel*: + -* *stable-2.x*: installs a namespace-scoped operator, which limits deployments of {HubName} and {ControllerName} instances to the namespace the operator is installed in. This is suitable for most cases. The stable-2.x channel does not require administrator privileges and utilizes fewer resources because it only monitors a single namespace. -* *stable-2.x-cluster-scoped*: deploys {HubName} and {ControllerName} across multiple namespaces in the cluster and requires administrator privileges for all namespaces in the cluster. +* *stable-2.x*: installs a namespace-scoped operator, which limits deployments of {HubName} and {ControllerName} instances to the namespace the operator is installed in, this is suitable for most cases. +The stable-2.x channel does not require administrator privileges and utilizes fewer resources because it only monitors a single namespace. +* *stable-2.x-cluster-scoped*: installs the {OperatorPlatformNameShort} in a single namespace that manages {PlatformNameShort} custom resources and deployments in all namespaces. +The {OperatorPlatformNameShort} requires administrator privileges for all namespaces in the cluster. . Select *Installation Mode*, *Installed Namespace*, and *Approval Strategy*. . Click btn:[Install]. -The installation process will begin. When installation is complete, a modal will appear notifying you that the {PlatformName} operator is installed in the specified namespace. +The installation process begins. When installation finishes, a modal appears notifying you that the {OperatorPlatformNameShort} is installed in the specified namespace. -* Click btn:[View Operator] to view your newly installed {PlatformName} operator. +.Verification + +* Click btn:[View Operator] to view your newly installed {OperatorPlatformNameShort} and verify the following operator custom resources are present: + +[cols="a,a,a,a"] +|=== +|{ControllerNameStart} | {HubNameStart} |{EDAName} (EDA) |{LightspeedShortName} + +| + +* Automation Controller +* Automation Controller Backup +* Automation Controller Restore +* Automation Controller Mesh Ingress + + +| + +* Automation Hub +* Automation Hub Backup +* Automation Hub Restore + + +| + +* EDA +* EDA Backup +* EDA Restore + + +| + +* Ansible Lightspeed + +|=== diff --git a/downstream/modules/platform/proc-install-cli-aap-operator.adoc b/downstream/modules/platform/proc-install-cli-aap-operator.adoc index 6b6272d03e..2eac1e348f 100644 --- a/downstream/modules/platform/proc-install-cli-aap-operator.adoc +++ b/downstream/modules/platform/proc-install-cli-aap-operator.adoc @@ -2,15 +2,21 @@ // assemblies/platform/assembly-installing-aap-operator-cli.adoc // titles/aap-operator-installation/ -[id="proc-install-cli-aap-operator{context}"] +[id="install-cli-aap-operator_{context}"] = Subscribing a namespace to an operator using the {OCPShort} CLI Use this procedure to subscribe a namespace to an operator. +[IMPORTANT] +==== +You can only subscribe a single instance of the {OperatorPlatformNameShort} into a single namespace. +Subscribing multiple instances in the same namespace can lead to improper operation for both operator instances. +==== + .Procedure -. Create a project for the operator +. Create a project for the operator. + ----- oc new-project ansible-automation-platform @@ -43,46 +49,68 @@ metadata: name: ansible-automation-platform namespace: ansible-automation-platform spec: - channel: 'stable-2.4' + channel: 'stable-2.5' installPlanApproval: Automatic name: ansible-automation-platform-operator source: redhat-operators sourceNamespace: openshift-marketplace --- -apiVersion: automationcontroller.ansible.com/v1beta1 -kind: AutomationController -metadata: - name: example - namespace: ansible-automation-platform -spec: - replicas: 1 - ----- + This file creates a `Subscription` object called `_ansible-automation-platform_` that subscribes the `ansible-automation-platform` namespace to the `ansible-automation-platform-operator` operator. + -It then creates an `AutomationController` object called `_example_` in the `ansible-automation-platform` namespace. -+ -To change the {ControllerName} name from `_example_`, edit the _name_ field in the `kind: AutomationController` section of [filename]`sub.yaml` and replace `__` with the name you want to use: -+ -[subs="+quotes"] ------ -apiVersion: automationcontroller.ansible.com/v1beta1 -kind: AutomationController -metadata: - name: ____ - namespace: ansible-automation-platform ------ . Run the [command]`*oc apply*` command to create the objects specified in the [filename]`sub.yaml` file: + ----- oc apply -f sub.yaml ----- ++ +. Verify the CSV PHASE reports "Succeeded" before proceeding using the [command]`oc get csv -n ansible-automation-platform` command: ++ +----- +oc get csv -n ansible-automation-platform -To verify that the namespace has been successfully subscribed to the `ansible-automation-platform-operator` operator, run the [command]`*oc get subs*` command: +NAME DISPLAY VERSION REPLACES PHASE +aap-operator.v2.5.0-0.1728520175 Ansible Automation Platform 2.5.0+0.1728520175 aap-operator.v2.5.0-0.1727875185 Succeeded +----- ++ +. Create an `AnsibleAutomationPlatform` object called `_example_` in the `ansible-automation-platform` namespace. ++ +To change the {PlatformNameShort} and its components from from `_example_`, edit the _name_ field in the `metadata:` section and replace example with the name you want to use: ++ ----- -$ oc get subs -n ansible-automation-platform +oc apply -f - < + file_storage_size: 10Gi + + ## uncomment if using S3 storage for Content pod + # storage_type: S3 + # object_storage_s3_secret: example-galaxy-object-storage + + ## uncomment if using Azure storage for Content pod + # storage_type: azure + # object_storage_azure_secret: azure-secret-name + lightspeed: + disabled: true +EOF ----- For further information about subscribing namespaces to operators, see link:{BaseURL}/openshift_container_platform/{OCPLatest}/html/operators/user-tasks#olm-installing-operator-from-operatorhub-using-cli_olm-installing-operators-in-namespace[Installing from OperatorHub using the CLI] in the {OCP} _Operators_ guide. diff --git a/downstream/modules/platform/proc-install-ha-hub-selinux.adoc b/downstream/modules/platform/proc-install-ha-hub-selinux.adoc index 8889e456b3..29dd1d4ef6 100644 --- a/downstream/modules/platform/proc-install-ha-hub-selinux.adoc +++ b/downstream/modules/platform/proc-install-ha-hub-selinux.adoc @@ -11,6 +11,11 @@ You must add the context for `/var/lib/pulp` pulpcore_static and run the {Platfo .Prerequisites * You have already configured a NFS export on your server. ++ +[NOTE] +==== +The NFS share is hosted on an external server and is not a part of high availability {HubName} deployment. +==== .Procedure . Create a mount point at `/var/lib/pulp`: @@ -21,7 +26,7 @@ $ mkdir /var/lib/pulp/ . Open `/etc/fstab` using a text editor, then add the following values: + ---- -srv_rhel8:/data /var/lib/pulp nfs defaults,_netdev,nosharecache 0 0 +srv_rhel8:/data /var/lib/pulp nfs defaults,_netdev,nosharecache,context="system_u:object_r:var_lib_t:s0" 0 0 srv_rhel8:/data/pulpcore_static /var/lib/pulp/pulpcore_static nfs defaults,_netdev,nosharecache,context="system_u:object_r:httpd_sys_content_rw_t:s0" 0 0 ---- . Run the reload systemd manager configuration command: diff --git a/downstream/modules/platform/proc-installing-ansible-core.adoc b/downstream/modules/platform/proc-installing-ansible-core.adoc index 23202ee3b9..e970b6d2d4 100644 --- a/downstream/modules/platform/proc-installing-ansible-core.adoc +++ b/downstream/modules/platform/proc-installing-ansible-core.adoc @@ -2,21 +2,23 @@ [id="installing-ansible-core_{context}"] -= Installing ansible-core - -[role="_abstract"] - - += Installing ansible-core on the RHEL host .Procedure - -. Install ansible-core and other tools: +. From your RHEL host, install `ansible-core`: + ---- -sudo dnf install -y ansible-core wget git rsync +sudo dnf install -y ansible-core ---- -. Set a fully qualified hostname: ++ +. Optionally, you can install additional utilities that can be useful for troubleshooting purposes, for example `wget`, `git`, `rsync`, and `vim`: ++ +---- +sudo dnf install -y wget git rsync vim +---- ++ +. Set a hostname that is a fully qualified domain name (FQDN): + ---- -sudo hostnamectl set-hostname your-FQDN-hostname +sudo hostnamectl set-hostname ---- diff --git a/downstream/modules/platform/proc-installing-containerized-aap.adoc b/downstream/modules/platform/proc-installing-containerized-aap.adoc index 0ababc924f..815f2cc2de 100644 --- a/downstream/modules/platform/proc-installing-containerized-aap.adoc +++ b/downstream/modules/platform/proc-installing-containerized-aap.adoc @@ -4,170 +4,17 @@ = Installing containerized {PlatformNameShort} -[role="_abstract"] - - -Installation of {PlatformNameShort} is controlled with inventory files. Inventory files define the hosts and containers used and created, variables for components, and other information needed to customize the installation. - -For convenience an example inventory file is provided, that you can copy and modify to quickly get started. - -[NOTE] -==== -There is no default database choice given in the inventory file. You must follow the instructions in the inventory file to make the appropriate choice between an internally provided postgres, or provide your own externally managed and supported database option. -==== - -Edit the inventory file by replacing the `< >` placeholders with your specific variables, and uncommenting any lines specific to your needs. - +Use the following command to install containerized {PlatformNameShort}: ---- -# This is the AAP installer inventory file -# Please consult the docs if you're unsure what to add -# For all optional variables please consult the included README.md - -# This section is for your AAP Controller host(s) -# ------------------------------------------------- -[automationcontroller] -fqdn_of_your_rhel_host ansible_connection=local - -# This section is for your AAP Automation Hub host(s) -# ----------------------------------------------------- -[automationhub] -fqdn_of_your_rhel_host ansible_connection=local - -# This section is for your AAP EDA Controller host(s) -# ----------------------------------------------------- -[automationeda] -fqdn_of_your_rhel_host ansible_connection=local - -# This section is for your AAP Execution host(s) -# ------------------------------------------------ -#[execution_nodes] -#fqdn_of_your_rhel_host - -# This section is for the AAP database(s) -# ----------------------------------------- -# Uncomment the lines below and amend appropriately if you want AAP to install and manage the postgres databases -# Leave commented out if you intend to use your own external database and just set appropriate _pg_hosts vars -# see mandatory sections under each AAP component -#[database] -#fqdn_of_your_rhel_host ansible_connection=local - -[all:vars] - -# Common variables needed for installation -# ---------------------------------------- -postgresql_admin_username=postgres -postgresql_admin_password= -# If using the online (non-bundled) installer, you need to set RHN registry credentials -registry_username= -registry_password= -# If using the bundled installer, you need to alter defaults by using: -#bundle_install=true -# The bundle directory must include /bundle in the path -#bundle_dir= -# To add more decision environment images you need to set the de_extra_images variable -#de_extra_images=[{'name': 'Custom decision environment', 'image': '//:'}] -# To add more execution environment images you need to set the ee_extra_images variable -#ee_extra_images=[{'name': 'Custom execution environment', 'image': '//:'}] -# To use custom TLS CA certificate/key you need to set these variables -#ca_tls_cert= -#ca_tls_key= - -# AAP Database - optional -# -------------------------- -# To use custom TLS certificate/key you need to set these variables -#postgresql_tls_cert= -#postgresql_tls_key= - -# AAP Controller - mandatory -# -------------------------- -controller_admin_password= -controller_pg_host=fqdn_of_your_rhel_host -controller_pg_password= - -# AAP Controller - optional -# ------------------------- -# To use the postinstall feature you need to set these variables -#controller_postinstall=true -#controller_license_file= -#controller_postinstall_dir= -# When using config-as-code in a git repository -#controller_postinstall_repo_url= -#controller_postinstall_repo_ref=main -# To use custom TLS certificate/key you need to set these variables -#controller_tls_cert= -#controller_tls_key= - -# AAP Automation Hub - mandatory -# ------------------------------ -hub_admin_password= -hub_pg_host=fqdn_of_your_rhel_host -hub_pg_password= - -# AAP Automation Hub - optional -# ----------------------------- -# To use the postinstall feature you need to set these variables -#hub_postinstall=true -#hub_postinstall_dir= -# When using config-as-code in a git repository -#hub_postinstall_repo_url= -#hub_postinstall_repo_ref=main -# To customize the number of worker containers -#hub_workers=2 -# To use the collection signing feature you need to set these variables -#hub_collection_signing=true -#hub_collection_signing_key= -# To use the container signing feature you need to set these variables -#hub_container_signing=true -#hub_container_signing_key= -# To use custom TLS certificate/key you need to set these variables -#hub_tls_cert= -#hub_tls_key= - -# AAP EDA Controller - mandatory -# ------------------------------ -eda_admin_password= -eda_pg_host=fqdn_of_your_rhel_host -eda_pg_password= - -# AAP EDA Controller - optional -# ----------------------------- -# When using an external controller node unmanaged by the installer. -#controller_main_url=https://fqdn_of_your_rhel_host -# To customize the number of default/activation worker containers -#eda_workers=2 -#eda_activation_workers=2 -# To use custom TLS certificate/key you need to set these variables -#eda_tls_cert= -#eda_tls_key= - -# AAP Execution Nodes - optional -# ----------------------------- -#receptor_port=27199 -#receptor_protocol=tcp -# To use custom TLS certificate/key you need to set these variables -#receptor_tls_cert= -#receptor_tls_key= -# To use custom RSA key pair you need to set these variables -#receptor_signing_private_key= -#receptor_signing_public_key= +ansible-playbook -i ansible.containerized_installer.install ---- -Use the following command to install containerized {PlatformNameShort}: - +For example: ---- ansible-playbook -i inventory ansible.containerized_installer.install ---- +* If your privilege escalation requires you to enter a password, append `-K` to the command. You are then prompted for the `BECOME` password. +* You can use increasing verbosity, up to 4 v's (`-vvvv`) to see the details of the installation process. However, it is important to note that this can significantly increase installation time, so it is recommended that you use it only as needed or requested by Red{nbsp}Hat support. -[NOTE] -==== - If your privilege escalation requires a password to be entered, append *-K* to the command line. You will then be prompted for the *BECOME* password. -==== - -You can use increasing verbosity, up to 4 v's (-vvvv) to see the details of the installation process. - -[NOTE] -==== -This can significantly increase installation time, so it is recommended that you use it only as needed or requested by Red Hat support. -==== \ No newline at end of file diff --git a/downstream/modules/platform/proc-installing-hub-using-operator.adoc b/downstream/modules/platform/proc-installing-hub-using-operator.adoc index 12453aee41..5de9ad444d 100644 --- a/downstream/modules/platform/proc-installing-hub-using-operator.adoc +++ b/downstream/modules/platform/proc-installing-hub-using-operator.adoc @@ -1,14 +1,16 @@ [id="proc-installing-hub-using-operator_{context}"] -= Installing {HubName} using the Operator += Installing {HubName} using the {OperatorPlatformNameShort} -Use the following procedure to install {HubName} using the operator. +Use the following procedure to install {HubName} using the {OperatorPlatformNameShort}. .Procedure +. Log in to {OCP}. . Navigate to menu:Operator[Installed Operators]. -. Select the {PlatformNameShort}. -. Select the {HubNameStart} tab and click btn:[Create {HubNameStart}]. +. Select your {OperatorPlatformNameShort} deployment. +. Select the {HubNameStart} tab. +. Click btn:[Create {HubNameStart}]. . Select btn:[YAML view]. The YAML should be similar to: + diff --git a/downstream/modules/platform/proc-installing-the-aap-setup-bundle.adoc b/downstream/modules/platform/proc-installing-the-aap-setup-bundle.adoc index 141a89ce1b..ca954acdf7 100644 --- a/downstream/modules/platform/proc-installing-the-aap-setup-bundle.adoc +++ b/downstream/modules/platform/proc-installing-the-aap-setup-bundle.adoc @@ -5,6 +5,7 @@ = Downloading and installing the {PlatformNameShort} setup bundle [role="_abstract"] + Choose the setup bundle to download {PlatformNameShort} for disconnected installations. This bundle includes the RPM content for {PlatformNameShort} and the default {ExecEnvShort} images that will be uploaded to your {PrivateHubName} during the installation process. @@ -12,69 +13,17 @@ Choose the setup bundle to download {PlatformNameShort} for disconnected install . Download the {PlatformNameShort} setup bundle package by navigating to the link:{PlatformDownloadUrl}[{PlatformName} download] page and clicking btn:[Download Now] for the {PlatformNameShort} {PlatformVers} Setup Bundle. -. From {ControllerName}, untar the bundle: +. On control node, untar the bundle: + ---- $ tar xvf \ - ansible-automation-platform-setup-bundle-2.4-1.tar.gz -$ cd ansible-automation-platform-setup-bundle-2.4-1 + ansible-automation-platform-setup-bundle-2.5-1.tar.gz +$ cd ansible-automation-platform-setup-bundle-2.5-1 ---- + -. Edit the inventory file to include the required options: - -.. automationcontroller group -.. automationhub group -.. admin_password -.. pg_password -.. automationhub_admin_password -.. automationhub_pg_host, automationhub_pg_port -.. automationhub_pg_password -+ -*Example Inventory file* -+ ----- -[automationcontroller] -automationcontroller.example.org ansible_connection=local - -[automationcontroller:vars] -peers=execution_nodes - -[automationhub] -automationhub.example.org - -[all:vars] -admin_password='password123' - -pg_database='awx' -pg_username='awx' -pg_password='dbpassword123' - -receptor_listener_port=27199 - -automationhub_admin_password='hubpassword123' - -automationhub_pg_host='automationcontroller.example.org' -automationhub_pg_port=5432 - -automationhub_pg_database='automationhub' -automationhub_pg_username='automationhub' -automationhub_pg_password='dbpassword123' -automationhub_pg_sslmode='prefer' ----- -+ -. Run the {PlatformNameShort} setup bundle executable as the root user: -+ ----- -$ sudo -i -# cd /path/to/ansible-automation-platform-setup-bundle-2.4-1 -# ./setup.sh ----- -+ -. When installation is complete, navigate to the Fully Qualified Domain Name (FQDN) for the {ControllerName} node that was specified in the installation inventory file. - -. Log in using the administrator credentials specified in the installation inventory file. +. Edit the inventory file to include variables based on your host names and desired password values. [NOTE] ==== -The inventory file must be kept intact after installation because it is used for backup, restore, and upgrade functions. Keep a backup copy in a secure location, given that the inventory file contains passwords. -==== \ No newline at end of file +See section xref:con-install-scenario-examples[3.2 Inventory file examples base on installation scenarios] for a list of examples that best fits your scenario. +==== diff --git a/downstream/modules/platform/proc-installing-the-ansible-builder-rpm.adoc b/downstream/modules/platform/proc-installing-the-ansible-builder-rpm.adoc index eae85c1fa0..12260dbb6f 100644 --- a/downstream/modules/platform/proc-installing-the-ansible-builder-rpm.adoc +++ b/downstream/modules/platform/proc-installing-the-ansible-builder-rpm.adoc @@ -8,7 +8,7 @@ [role="_abstract"] -On the RHEL system where custom {ExecEnvShort}s will be built, you will install the {Builder} RPM using a Satellite server that already exists in the environment. This method is preferred because the {ExecEnvShort} images can use any RHEL content from the pre-existing Satellite if required. +On the RHEL system where custom {ExecEnvShort}s will be built, you will install the {Builder} RPM by using a Satellite Server that already exists in the environment. This method is preferred because the {ExecEnvShort} images can use any RHEL content from the pre-existing Satellite if required. .Procedure @@ -17,11 +17,11 @@ On the RHEL system where custom {ExecEnvShort}s will be built, you will install .. Subscribe the RHEL system to a Satellite on the disconnected network. -.. Attach the {PlatformNameShort} subscription and enable the AAP repository. The repository name will either be `ansible-automation-platform-2.4-for-rhel-8-x86_64-rpms` or `ansible-automation-platform-2.4-for-rhel-9-x86_64-rpms` depending on the version of RHEL used on the underlying system. +.. Attach the {PlatformNameShort} subscription and enable the {PlatformNameShort} repository. The repository name is either `ansible-automation-platform-2.4-for-rhel-8-x86_64-rpms` or `ansible-automation-platform-2.4-for-rhel-9-x86_64-rpms` depending on the version of RHEL used on the underlying system. -.. Install the {Builder} RPM. The version of the {Builder} RPM must be 3.0.0 or later in order for the examples below to work properly. +.. Install the {Builder} RPM. The version of the {Builder} RPM must be 3.0.0 or later in order for the examples below to work properly. -. Install the {Builder} RPM from the {PlatformNameShort} setup bundle. Use this method if a Satellite server is not available on your disconnected network. +. Install the {Builder} RPM from the {PlatformNameShort} setup bundle. Use this method if a Satellite Server is not available on your disconnected network. .. Unarchive the {PlatformNameShort} setup bundle. diff --git a/downstream/modules/platform/proc-installing-the-ansible-platform-operator.adoc b/downstream/modules/platform/proc-installing-the-ansible-platform-operator.adoc index 72d014dadb..7538bb67d0 100644 --- a/downstream/modules/platform/proc-installing-the-ansible-platform-operator.adoc +++ b/downstream/modules/platform/proc-installing-the-ansible-platform-operator.adoc @@ -4,8 +4,10 @@ .Procedure -. Navigate to menu:Operator[Operator Hub] and search for the {PlatformNameShort} Operator. -. Select the {PlatformNameShort} Operator project. +. Log in to {OCP}. +. Navigate to menu:Operator[Operator Hub]. +. Search for the {OperatorPlatformNameShort}. +. Select the {OperatorPlatformNameShort} project. . Click on the Operator tile. . Click btn:[Install]. . Select a Project to install the Operator into. diff --git a/downstream/modules/platform/proc-installing-with-internet.adoc b/downstream/modules/platform/proc-installing-with-internet.adoc index db3ae20e2b..a56c582b73 100644 --- a/downstream/modules/platform/proc-installing-with-internet.adoc +++ b/downstream/modules/platform/proc-installing-with-internet.adoc @@ -12,11 +12,25 @@ Choose the {PlatformName} installer if your {RHEL} environment is connected to t . Navigate to the link:{PlatformDownloadUrl}[{PlatformName} download] page. . Click btn:[Download Now] for the *Ansible Automation Platform Setup*. -. Extract the files: -+ +. Transfer the file to the target server using `scp` or `curl`: +.. Using `scp`: +... Run the following command, replacing `private_key.pem`, `user`, and `server_ip` with your appropriate values: +----- +$ scp -i private_key.pem aap-bundled-installer.tar.gz user@server_ip: +----- +.. Using `curl`: +... If the setup file URL is available, you can download it directly to the target server using `curl`. Replace `` with the file URL: +----- +$ curl -0 +----- + +[NOTE] +==== +If the file needs to be extracted after downloading, run the following command: ----- -$ tar xvzf ansible-automation-platform-setup-.tar.gz +$ tar xvzf aap-bundled-installer.tar.gz ----- +==== .RPM install @@ -25,16 +39,18 @@ $ tar xvzf ansible-automation-platform-setup-.tar.gz v.{PlatformVers} for RHEL 8 for x86_64 + ---- -$ sudo dnf install --enablerepo=ansible-automation-platform-2.4-for-rhel-8-x86_64-rpms ansible-automation-platform-installer +$ sudo dnf install --enablerepo=ansible-automation-platform-2.5-for-rhel-8-x86_64-rpms ansible-automation-platform-installer ---- + -v.{PlatformVers} for RHEL 9 for x86-64 +v.{PlatformVers} for RHEL 9 for x86_64 + ---- -$ sudo dnf install --enablerepo=ansible-automation-platform-2.4-for-rhel-9-x86_64-rpms ansible-automation-platform-installer +$ sudo dnf install --enablerepo=ansible-automation-platform-2.5-for-rhel-9-x86_64-rpms ansible-automation-platform-installer ---- [NOTE] +==== `dnf install` enables the repo as the repo is disabled by default. +==== When you use the RPM installer, the files are placed under the `/opt/ansible-automation-platform/installer` directory. diff --git a/downstream/modules/platform/proc-inventory-file-setup-rpm.adoc b/downstream/modules/platform/proc-inventory-file-setup-rpm.adoc new file mode 100644 index 0000000000..298d8d7032 --- /dev/null +++ b/downstream/modules/platform/proc-inventory-file-setup-rpm.adoc @@ -0,0 +1,28 @@ +[id="inventory-file-setup-rpm"] + += Setting up the inventory file + +Before upgrading your {PlatformName} installation, edit the `inventory` file so that it matches your desired configuration. You can keep the same parameters from your existing {PlatformNameShort} deployment or you can modify the parameters to match any changes to your environment. + +.Procedure + +. Navigate to the installation program directory. ++ +*Bundled installer* ++ +---- +$ cd ansible-automation-platform-setup-bundle-- +---- ++ +*Online installer* ++ +---- +$ cd ansible-automation-platform-setup- +---- ++ +. Update the `inventory` file to match your desired configuration. You can use the same inventory file from an existing {PlatformNameShort} installation if there are no changes to the environment. + +[NOTE] +==== +Provide a reachable IP address or fully qualified domain name (FQDN) for all hosts to ensure that users can synchronize and install content from {HubNameMain} from a different node. Do not use localhost. If localhost is used, the upgrade will be stopped as part of preflight checks. +==== \ No newline at end of file diff --git a/downstream/modules/platform/proc-modifying-the-run-schedule.adoc b/downstream/modules/platform/proc-modifying-the-run-schedule.adoc new file mode 100644 index 0000000000..2dd04fd9d8 --- /dev/null +++ b/downstream/modules/platform/proc-modifying-the-run-schedule.adoc @@ -0,0 +1,55 @@ +:_newdoc-version: 2.18.3 +:_template-generated: 2024-07-15 +:_mod-docs-content-type: PROCEDURE + +[id="modifying-the-run-schedule_{context}"] += Modifying the run schedule + + +You can configure `metrics-utility` to run at specified times and intervals. Run frequency is expressed in cronjobs. See link:https://www.redhat.com/sysadmin/linux-cron-command[How to schedule jobs using the Linux ‘Cron’ utility] for more information. + +== On RHEL +.Procedure + +. From the command line, run: ++ +[source, ] +---- +crontab -e +---- ++ +. After the code editor has opened, update the `gather` and `build` parameters using cron syntax as shown below: ++ +[source, ] +---- +*/2 * * * * metrics-utility gather_automation_controller_billing_data --ship --until=10m +*/5 * * * * metrics-utili +ty build_report +---- ++ +. Save and close the file. + +== On {OCPShort} from the {PlatformNameShort} operator +.Procedure + +. From the navigation panel, select menu:Workloads[Deployments]. +. On the next screen, select *automation-controller-operator-controller-manager*. +. Beneath the heading *Deployment Details*, click the down arrow button to change the number of pods to zero. This will pause the deployment so you can update the running schedule. +. From the navigation panel, select *Installed Operators*. From the list of installed operators, select {PlatformNameShort}. +. On the next screen, select the {ControllerName} tab. +. From the list that appears, select your {ControllerName} instance. +. On the next screen, select the `YAML` tab. +. In the `YAML` file, find the following parameters and enter a variable representing how often `metrics-utility` should gather data and how often it should produce a report: ++ +[source, ] +---- +metrics_utility_cronjob_gather_schedule: +metrics_utility_cronjob_report_schedule: +---- ++ +. Click btn:[Save]. +. From the navigation menu, select menu:Deployments[] and then select *automation-controller-operator-controller-manager*. +. Increase the number of pods to 1. +. To verify that you have changed the `metrics-utility` running schedule successfully, you can take one or both of the following steps: +.. return to the `YAML` file and ensure that the parameters described above reflect the correct variables. +.. From the navigation menu, select menu:Workloads[Cronjobs] and ensure that your cronjobs show the updated schedule. diff --git a/downstream/modules/platform/proc-operator-aap-faq.adoc b/downstream/modules/platform/proc-operator-aap-faq.adoc index 1501a95031..d17a3fe48b 100644 --- a/downstream/modules/platform/proc-operator-aap-faq.adoc +++ b/downstream/modules/platform/proc-operator-aap-faq.adoc @@ -1,8 +1,8 @@ [id="operator-aap-troubleshooting_{context}"] -= Frequently asked questions on platform gateway += Frequently asked questions on {Gateway} -If I delete my Ansible Automation Platform deployment will I still have access to Automation Controller?:: +If I delete my {PlatformNameShort} deployment will I still have access to {ControllerName}?:: No, {ControllerName}, {HubName}, and {EDAName} are nested within the deployment and are also deleted. Something went wrong with my deployment but I'm not sure what, how can I find out?:: @@ -10,7 +10,7 @@ You can follow along in the command line while the operator is reconciling, this Alternatively you can click into the deployment instance to see the status conditions being updated as the deployment goes on. Is it still possible to view individual component logs?:: -When troubleshooting you should examine the *AnsibleAutomationPlatform* instance for the main logs and then each individual component (*EDA*, *AutomationHub*, *AutomationController*) for more specific information. +When troubleshooting you should examine the *{PlatformNameShort}* instance for the main logs and then each individual component (*EDA*, *AutomationHub*, *AutomationController*) for more specific information. Where can I view the condition of an instance?:: To display status conditions click into the instance, and look under the *Details* or *Events* tab. @@ -19,4 +19,7 @@ Alternatively, to display the status conditions you can run the get command: Can I track my migration in real time?:: To help track the status of the migration or to understand why migration might have failed you can look at the migration logs as they are running. Use the logs command: -`oc logs fresh-install-controller-migration-4.6.0-jwfm6 -f` \ No newline at end of file +`oc logs fresh-install-controller-migration-4.6.0-jwfm6 -f` + +I have configured my SAML but authentication fails with this error: "Unable to complete social auth login" What can I do?:: +You must update your {PlatformNameShort} instance to include the `REDIRECT_IS_HTTPS` extra setting. See link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/installing_on_openshift_container_platform/index#proc-operator-enable-https-redirect[Enabling single sign-on (SSO) for {Gateway} on {OCPShort}] for help with this. \ No newline at end of file diff --git a/downstream/modules/platform/proc-operator-access-aap.adoc b/downstream/modules/platform/proc-operator-access-aap.adoc index 3ff066bb8c..6fe62588d4 100644 --- a/downstream/modules/platform/proc-operator-access-aap.adoc +++ b/downstream/modules/platform/proc-operator-access-aap.adoc @@ -1,16 +1,17 @@ [id="operator-access-aap_{context}"] -= Accessing the platform gateway -You should use the *AnsibleAutomationPlatform* instance as your default. += Accessing the {Gateway} +You should use the *{PlatformNameShort}* instance as your default. This instance links the {ControllerName}, {HubName}, and {EDAName} deployments to a single interface. .Procedure -To access your *AnsibleAutomationPlatform* instance: +To access your *{PlatformNameShort}* instance: +. Log in to {OCP}. . Navigate to menu:Networking[Routes] -. Click the link under *Location* for *AnsibleAutomationPlatform*. -. This redirects you to the *AnsibleAutomationPlatform* login page. Enter your username in the *Username* field. +. Click the link under *Location* for *{PlatformNameShort}*. +. This redirects you to the {PlatformNameShort} login page. Enter "admin" as your username in the *Username* field. . For the password you need to: .. Go to to menu:Workloads[Secrets]. .. Click btn:[-admin-password] and copy the password. @@ -20,12 +21,12 @@ To access your *AnsibleAutomationPlatform* instance: .. Click btn:[Subscription manifest] or btn:[Username/password]. .. Upload your manifest or enter your username and password. .. Select your subscription from the *Subscription* list. -.. Click btn:[Next]. -+ This redirects you to the *Analytics* page. +.. Click btn:[Next]. + +This redirects you to the *Analytics* page. . Click btn:[Next]. . Select the *I agree to the terms of the license agreement* checkbox. . Click btn:[Next]. -You now have access to the platform gateway user interface. -If you cannot access the {PlatformNameShort} see <> for help with troubleshooting and debugging. +You now have access to the {Gateway} user interface. +If you cannot access the {PlatformNameShort} see xref:operator-aap-troubleshooting_{context}[Frequently asked questions on {Gateway}] for help with troubleshooting and debugging. diff --git a/downstream/modules/platform/proc-operator-create_crs.adoc b/downstream/modules/platform/proc-operator-create_crs.adoc new file mode 100644 index 0000000000..f230cbbe75 --- /dev/null +++ b/downstream/modules/platform/proc-operator-create_crs.adoc @@ -0,0 +1,56 @@ +[id="operator-create-crs_{context}"] + += Creating {PlatformNameShort} custom resources + +After upgrading to the latest version of {OperatorPlatformNameShort} on {OCPShort}, you can create an {PlatformNameShort} custom resource (CR) that specifies the names of your existing deployments, in the same namespace. + +.Procedure +This example outlines the steps to deploy a new {EDAName} setup after upgrading to the latest version, with existing {ControllerName} and {HubName} deployments already in place. + + +The xref:appendix-operator-crs_performance-considerations[Appendix] contains more examples of {PlatformNameShort} CRs for different deployments. + +. Log in to {OCP}. +. Navigate to menu:Operators[Installed Operators]. +. Select your {OperatorPlatformNameShort} deployment. +. Select the *Details* tab. +. On the *{PlatformNameShort}* tile click btn:[Create instance]. +. From the *Create {PlatformNameShort}* page enter a name for your instance in the *Name* field. +. Click btn:[YAML view] and paste the following YAML (xref:appendix-operator-crs_performance-considerations[aap-existing-controller-and-hub-new-eda.yml]): ++ +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + + controller: + name: existing-controller #obtain name from controller CR + disabled: false + + eda: + disabled: false + + hub: + name: existing-hub + disabled: false +---- +. Click btn:[Create]. + +[NOTE] +==== +You can override the operator’s default image for {ControllerName}, {HubName}, or platform-resource app images by specifying the preferred image on the YAML spec. +This enables upgrading a specific deployment, like a controller, without updating the operator. + +The recommended approach however, is to upgrade the operator and use the default image values. +==== + +.Verification +Navigate to your {OperatorPlatformNameShort} deployment and click btn:[All instances] to verify whether all instances have deployed correctly. +You should see the *{PlatformNameShort}* instance and the deployed *AutomationController*, *EDA*, and *AutomationHub* instances here. + +Alternatively, you can verify whether all instances deployed correctly by running `oc get route` in the command line. diff --git a/downstream/modules/platform/proc-operator-deploy-central-config.adoc b/downstream/modules/platform/proc-operator-deploy-central-config.adoc index 57566a1e80..712ff3b69d 100644 --- a/downstream/modules/platform/proc-operator-deploy-central-config.adoc +++ b/downstream/modules/platform/proc-operator-deploy-central-config.adoc @@ -1,56 +1,68 @@ [id="operator-deploy-central-config_{context}"] -= Deploying the platform gateway with existing {PlatformNameShort} components -You can link any components of the {PlatformNameShort}, that you have already installed to a new *AnsibleAutomationPlatform* instance. += Deploying the {Gateway} with existing {PlatformNameShort} components +You can link any components of the {PlatformNameShort}, that you have already installed to a new *{PlatformNameShort}* instance. The following procedure simulates a scenario where you have {ControllerName} as an existing component and want to add {HubName} and {EDAName}. .Procedure . Log in to {OCP}. -. Go to to menu:Operators[Installed Operators]. +. Navigate to menu:Operators[Installed Operators]. +. Select your {OperatorPlatformNameShort} deployment. . Click btn:[Subscriptions] and edit your *Update channel* to *stable-2.5*. -. Click btn:[Details] and on the *AnsibleAutomationPlatform* tile click btn:[Create instance]. -. From the *Create AnsibleAutomationPlatform* page enter a name for your instance in the *Name* field. +. Click btn:[Details] and on the *{PlatformNameShort}* tile click btn:[Create instance]. +. From the *Create {PlatformNameShort}* page enter a name for your instance in the *Name* field. +* When deploying an {PlatformNameShort} instance, ensure that `auto_update` is set to the default value of `false` on your existing {ControllerName} instance in order for the integration to work. + . Click btn:[YAML view] and copy in the following: + ---- -yaml apiVersion: aap.ansible.com/v1alpha1 - kind: AnsibleAutomationPlatform - metadata: - name: example-aap - namespace: aap - spec: - # Platform - image_pull_policy: IfNotPresent - # Components - controller: - disabled: false - name: existing-controller-name - eda: - disabled: false - hub: - disabled: false - ## uncomment if using file storage for Content pod - storage_type: file - file_storage_storage_class: your-rwx-storage-class - file_storage_size: 10Gi - - ## uncomment if using S3 storage for Content pod - # storage_type: S3 - # object_storage_s3_secret: example-galaxy-object-storage - - ## uncomment if using Azure storage for Content pod - # storage_type: azure - # object_storage_azure_secret: azure-secret-name - lightspeed: - disabled: true +kind: AnsibleAutomationPlatform +metadata: + name: example-aap + namespace: aap +spec: + database: + resource_requirements: + requests: + cpu: 200m + memory: 512Mi + storage_requirements: + requests: + storage: 100Gi + + # Platform + image_pull_policy: IfNotPresent + + # Components + controller: + disabled: false + name: existing-controller-name + eda: + disabled: false + hub: + disabled: false + ## uncomment if using file storage for Content pod + storage_type: file + file_storage_storage_class: + file_storage_size: 10Gi + + ## uncomment if using S3 storage for Content pod + # storage_type: S3 + # object_storage_s3_secret: example-galaxy-object-storage + + ## uncomment if using Azure storage + ---- -.. For new components, if you do not specify a name, a default name generates. +.. For new components, if you do not specify a name, a default name is generated. . Click btn:[Create]. -. To access your new instance, see <>. +. To access your new instance, see xref:operator-access-aap_{context}[Accessing the {Gateway}]. -NOTE: If you have an existing controller with a managed Postgres pod, after creating the *AnsibleAutomationPlatform* resource your {ControllerName} instance will continue to use that original Postgres pod. If you were to do a fresh install you would have a single Postgres managed pod for all instances. +[NOTE] +==== +If you have an existing controller with a managed Postgres pod, after creating the *{PlatformNameShort}* resource your {ControllerName} instance will continue to use that original Postgres pod. If you were to do a fresh install you would have a single Postgres managed pod for all instances. +==== diff --git a/downstream/modules/platform/proc-operator-deploy-redis.adoc b/downstream/modules/platform/proc-operator-deploy-redis.adoc new file mode 100644 index 0000000000..6867b804dc --- /dev/null +++ b/downstream/modules/platform/proc-operator-deploy-redis.adoc @@ -0,0 +1,27 @@ +[id="operator-deploy-redis"] + += Deploying clustered Redis on {OperatorPlatformName} + +When you create an {PlatformNameShort} instance through the {OperatorPlatformNameShort}, standalone Redis is assigned by default. +To deploy clustered Redis, use the following procedure. + +//Add a link to the section when ready +For more information about Redis, refer to Caching and queueing system in the _Planning your installation_ guide. + +.Prerequisites +* You have installed an {OperatorPlatformNameShort} deployment. + +.Procedure +. Log in to {OCP}. +. Navigate to menu:Operators[Installed Operators]. +. Select your {OperatorPlatformNameShort} deployment. +. Select the *Details* tab. +. On the *{PlatformNameShort}* tile click btn:[Create instance]. +.. For existing instances, you can edit the YAML view by clicking the {MoreActionsIcon} icon and then btn:[Edit AnsibleAutomationPlatform]. +.. Change the *redis_mode* value to "cluster". +.. Click btn:[Reload], then btn:[Save]. +. Click to expand *Advanced configuration*. +. For the *Redis Mode* list, select *Cluster*. +. Configure the rest of your instance as necessary, then click btn:[Create]. + +Your instance will deploy with a cluster Redis with 6 Redis replicas as default. \ No newline at end of file diff --git a/downstream/modules/platform/proc-operator-enable-https-redirect.adoc b/downstream/modules/platform/proc-operator-enable-https-redirect.adoc new file mode 100644 index 0000000000..357ff1b1c0 --- /dev/null +++ b/downstream/modules/platform/proc-operator-enable-https-redirect.adoc @@ -0,0 +1,35 @@ +[id="proc-operator-enable-https-redirect"] + += Enabling HTTPS redirect for single sign-on (SSO) for {Gateway} on {OCPShort} + +HTTPS redirect for SAML, allows you to log in once and access all of the {Gateway} without needing to reauthenticate. + +.Prerequisites + +* You have successfully configured SAML in the gateway from the {OperatorPlatformNameShort}. Refer to link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/access_management_and_authentication/index#controller-set-up-SAML[Configuring SAML authentication] for help with this. + +.Procedure + +. Log in to {OCP}. +. Go to menu:Operators[Installed Operators]. +. Select your {OperatorPlatformNameShort} deployment. +. Select *All Instances* and go to your *AnsibleAutomationPlatform* instance. +. Click the {MoreActionsIcon} icon and then select btn:[Edit AnsibleAutomationPlatform]. +. In the *YAML view* paste the following YAML code under the `spec:` section: ++ +---- +spec: + extra_settings: + - setting: REDIRECT_IS_HTTPS + value: '"True"' + +---- ++ +. Click btn:[Save]. + +.Verification + +After you have added the `REDIRECT_IS_HTTPS` setting, wait for the pod to redeploy automatically. You can verify this setting makes it into the pod by running: +---- +oc exec -it -- grep REDIRECT /etc/ansible-automation-platform/gateway/settings.py +---- \ No newline at end of file diff --git a/downstream/modules/platform/proc-operator-external-db-controller.adoc b/downstream/modules/platform/proc-operator-external-db-controller.adoc index ed32371ed8..b9a74cfe1b 100644 --- a/downstream/modules/platform/proc-operator-external-db-controller.adoc +++ b/downstream/modules/platform/proc-operator-external-db-controller.adoc @@ -1,35 +1,35 @@ [id="proc-operator-external-db-controller"] -= Configuring an external database for {ControllerName} on {PlatformName} operator += Configuring an external database for {ControllerName} on {OperatorPlatformName} [role="_abstract"] For users who prefer to deploy {PlatformNameShort} with an external database, they can do so by configuring a secret with instance credentials and connection information, then applying it to their cluster using the `oc create` command. -By default, the {PlatformName} operator automatically creates and configures a managed PostgreSQL pod in the same namespace as your {PlatformNameShort} deployment. You can deploy {PlatformNameShort} with an external database instead of the managed PostgreSQL pod that the {PlatformName} operator automatically creates. +By default, the {OperatorPlatformNameShort} automatically creates and configures a managed PostgreSQL pod in the same namespace as your {PlatformNameShort} deployment. You can deploy {PlatformNameShort} with an external database instead of the managed PostgreSQL pod that the {OperatorPlatformNameShort} automatically creates. Using an external database lets you share and reuse resources and manually manage backups, upgrades, and performance optimizations. [NOTE] ==== -The same external database (PostgreSQL instance) can be used for both {HubName} and {ControllerName} as long as the database names are different. In other words, you can have multiple databases with different names inside a single PostgreSQL instance. +The same external database (PostgreSQL instance) can be used for both {HubName}, {ControllerName}, and {Gateway} as long as the database names are different. In other words, you can have multiple databases with different names inside a single PostgreSQL instance. ==== -The following section outlines the steps to configure an external database for your {ControllerName} on a {PlatformNameShort} operator. +The following section outlines the steps to configure an external database for your {ControllerName} on a {OperatorPlatformNameShort}. .Prerequisite The external database must be a PostgreSQL database that is the version supported by the current release of {PlatformNameShort}. [NOTE] ==== -{PlatformNameShort} {PlatformVers} supports PostgreSQL 13. +{PlatformNameShort} {PlatformVers} supports {PostgresVers}. ==== .Procedure The external postgres instance credentials and connection information must be stored in a secret, which is then set on the {ControllerName} spec. -. Create a `postgres_configuration_secret` .yaml file, following the template below: +. Create a `postgres_configuration_secret` YAML file, following the template below: + ---- apiVersion: v1 @@ -47,7 +47,7 @@ stringData: type: "unmanaged" type: Opaque ---- -<1> Namespace to create the secret in. This should be the same namespace you wish to deploy to. +<1> Namespace to create the secret in. This should be the same namespace you want to deploy to. <2> The resolvable hostname for your database node. <3> External port defaults to `5432`. <4> Value for variable `password` should not contain single or double quotes (', ") or backslashes (\) to avoid any issues during deployment, backup or restoration. diff --git a/downstream/modules/platform/proc-operator-external-db-gateway.adoc b/downstream/modules/platform/proc-operator-external-db-gateway.adoc new file mode 100644 index 0000000000..7fb5de3fd0 --- /dev/null +++ b/downstream/modules/platform/proc-operator-external-db-gateway.adoc @@ -0,0 +1,98 @@ +[id="proc-operator-external-db-gateway"] + += Configuring an external database for {Gateway} on {OperatorPlatformName} + +[role="_abstract"] +There are two scenarios for deploying {PlatformNameShort} with an external database: + +[cols="a,a"] +|=== +| Scenario | Action required +| Fresh install | You must specify a single external database instance for the platform to use for the following: + +* {GatewayStart} +* {ControllerNameStart} +* {HubNameStart} +* {EDAName} +* {LightspeedShortName} (If enabled) + +See the _aap-configuring-external-db-all-default-components.yml_ example in the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/installing_on_openshift_container_platform/index#operator-crs[14.1. Custom resources] section for help with this. + +If using {LightspeedShortName}, use the _aap-configuring-external-db-with-lightspeed-enabled.yml_ example. + +| Existing external database in 2.4 | Your existing external database remains the same after upgrading but you must specify the `external-postgres-configuration-gateway` (spec.database.database_secret) on the {PlatformNameShort} custom resource. +|=== + + +To deploy {PlatformNameShort} with an external database, you must first create a Kubernetes secret with credentials for connecting to the database. + +By default, the {OperatorPlatformNameShort} automatically creates and configures a managed PostgreSQL pod in the same namespace as your {PlatformNameShort} deployment. You can deploy {PlatformNameShort} with an external database instead of the managed PostgreSQL pod that the {OperatorPlatformNameShort} automatically creates. + +Using an external database lets you share and reuse resources and manually manage backups, upgrades, and performance optimizations. + +[NOTE] +==== +The same external database (PostgreSQL instance) can be used for both {HubName}, {ControllerName}, and {Gateway} as long as the database names are different. In other words, you can have multiple databases with different names inside a single PostgreSQL instance. +==== + +The following section outlines the steps to configure an external database for your {Gateway} on a {OperatorPlatformNameShort}. + +.Prerequisite +The external database must be a PostgreSQL database that is the version supported by the current release of {PlatformNameShort}. + +[NOTE] +==== +{PlatformNameShort} {PlatformVers} supports {PostgresVers}. +==== + +.Procedure + +The external postgres instance credentials and connection information must be stored in a secret, which is then set on the {Gateway} spec. + +. Create a `postgres_configuration_secret` YAML file, following the template below: ++ +---- +apiVersion: v1 +kind: Secret +metadata: + name: external-postgres-configuration + namespace: <1> +stringData: + host: "" <2> + port: "" <3> + database: "" + username: "" + password: "" <4> + type: "unmanaged" +type: Opaque +---- +<1> Namespace to create the secret in. This should be the same namespace you want to deploy to. +<2> The resolvable hostname for your database node. +<3> External port defaults to `5432`. +<4> Value for variable `password` should not contain single or double quotes (', ") or backslashes (\) to avoid any issues during deployment, backup or restoration. +// [Christian Adams] We can roll out a fix for it 3/12, then next async release for everything. It may be good to exclude step 5 for ssl mode here. We'll need to track added that in once the fix is in for the operator. - Removing point 5 here until a fix is implemented. +// <5> The variable `sslmode` is valid for `external` databases only. The allowed values are: `*prefer*`, `*disable*`, `*allow*`, `*require*`, `*verify-ca*`, and `*verify-full*`. +. Apply `external-postgres-configuration-secret.yml` to your cluster using the `oc create` command. ++ +---- +$ oc create -f external-postgres-configuration-secret.yml +---- ++ +[NOTE] +==== +The following example is for a {Gateway} deployment. +To configure an external database for all components, use the _aap-configuring-external-db-all-default-components.yml_ example in the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/installing_on_openshift_container_platform/index#operator-crs[14.1. Custom resources] section. +==== + +. When creating your `AnsibleAutomationPlatform` custom resource object, specify the secret on your spec, following the example below: ++ +---- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: example-aap + Namespace: aap +spec: + database: + database_secret: automation-platform-postgres-configuration +---- diff --git a/downstream/modules/platform/proc-operator-external-db-hub.adoc b/downstream/modules/platform/proc-operator-external-db-hub.adoc index 3a6ade5028..aa45d526f0 100644 --- a/downstream/modules/platform/proc-operator-external-db-hub.adoc +++ b/downstream/modules/platform/proc-operator-external-db-hub.adoc @@ -1,35 +1,35 @@ [id="proc-operator-external-db-hub"] -= Configuring an external database for {HubName} on {PlatformName} operator += Configuring an external database for {HubName} on {OperatorPlatformName} [role="_abstract"] For users who prefer to deploy {PlatformNameShort} with an external database, they can do so by configuring a secret with instance credentials and connection information, then applying it to their cluster using the `oc create` command. -By default, the {PlatformName} operator automatically creates and configures a managed PostgreSQL pod in the same namespace as your {PlatformNameShort} deployment. +By default, the {OperatorPlatformNameShort} automatically creates and configures a managed PostgreSQL pod in the same namespace as your {PlatformNameShort} deployment. You can choose to use an external database instead if you prefer to use a dedicated node to ensure dedicated resources or to manually manage backups, upgrades, or performance tweaks. [NOTE] ==== -The same external database (PostgreSQL instance) can be used for both {HubName} and {ControllerName} as long as the database names are different. In other words, you can have multiple databases with different names inside a single PostgreSQL instance. +The same external database (PostgreSQL instance) can be used for both {HubName}, {ControllerName}, and {Gateway} as long as the database names are different. In other words, you can have multiple databases with different names inside a single PostgreSQL instance. ==== -The following section outlines the steps to configure an external database for your {HubName} on a {PlatformNameShort} operator. +The following section outlines the steps to configure an external database for your {HubName} on a {OperatorPlatformNameShort}. .Prerequisite The external database must be a PostgreSQL database that is the version supported by the current release of {PlatformNameShort}. [NOTE] ==== -{PlatformNameShort} {PlatformVers} supports PostgreSQL 13. +{PlatformNameShort} {PlatformVers} supports {PostgresVers}. ==== .Procedure The external postgres instance credentials and connection information will need to be stored in a secret, which will then be set on the {HubName} spec. -. Create a `postgres_configuration_secret` .yaml file, following the template below: +. Create a `postgres_configuration_secret` YAML file, following the template below: + ---- apiVersion: v1 @@ -47,7 +47,7 @@ stringData: type: "unmanaged" type: Opaque ---- -<1> Namespace to create the secret in. This should be the same namespace you wish to deploy to. +<1> Namespace to create the secret in. This should be the same namespace you want to deploy to. <2> The resolvable hostname for your database node. <3> External port defaults to `5432`. <4> Value for variable `password` should not contain single or double quotes (', ") or backslashes (\) to avoid any issues during deployment, backup or restoration. diff --git a/downstream/modules/platform/proc-operator-link-components.adoc b/downstream/modules/platform/proc-operator-link-components.adoc index a31d7dd978..def55d37b1 100644 --- a/downstream/modules/platform/proc-operator-link-components.adoc +++ b/downstream/modules/platform/proc-operator-link-components.adoc @@ -1,18 +1,32 @@ [id="operator-link-components_{context}"] -= Linking your components to the platform gateway += Linking your components to the {Gateway} -After installing the {OperatorPlatform} in your namespace you can set up your *AnsibleAutomationPlatform* instance. +After installing the {OperatorPlatformNameShort} in your namespace you can set up your *{PlatformNameShort}* instance. Then link all the platform components to a single user interface. .Procedure -. Go to your {OperatorPlatform} and click btn:[Details]. -. On the *AnsibleAutomationPlatform* tile click btn:[Create instance]. -. From the *Create AnsibleAutomationPlatform* page enter a name for your instance in the *Name* field. + +. Log in to {OCP}. +. Navigate to menu:Operators[Installed Operators]. +. Select your {OperatorPlatformNameShort} deployment. +. Select the *Details* tab. + +. On the *{PlatformNameShort}* tile click btn:[Create instance]. +. From the *Create {PlatformNameShort}* page enter a name for your instance in the *Name* field. . Click btn:[YAML view] and paste the following: + ---- spec: + database: + resource_requirements: + requests: + cpu: 200m + memory: 512Mi + storage_requirements: + requests: + storage: 100Gi + controller: disabled: false @@ -22,14 +36,14 @@ spec: hub: disabled: false storage_type: file - file_storage_storage_class: nfs-local-rwx + file_storage_storage_class: file_storage_size: 10Gi ---- . Click btn:[Create]. .Verification -Go to your {OperatorPlatform} deployment and click btn:[All instances] to verify if all instances deployed correctly. -You should see the *AnsibleAutomationPlatform* instance and the deployed *AutomationController*, *EDA*, and *AutomationHub* instances here. +Go to your {OperatorPlatformNameShort} deployment and click btn:[All instances] to verify if all instances deployed correctly. +You should see the *{PlatformNameShort}* instance and the deployed *AutomationController*, *EDA*, and *AutomationHub* instances here. Alternatively you can check by the command line, run: `oc get route` diff --git a/downstream/modules/platform/proc-operator-mesh-upgrading-receptors.adoc b/downstream/modules/platform/proc-operator-mesh-upgrading-receptors.adoc new file mode 100644 index 0000000000..6b7fcded22 --- /dev/null +++ b/downstream/modules/platform/proc-operator-mesh-upgrading-receptors.adoc @@ -0,0 +1,43 @@ +[id="proc-operator-mesh-upgrading-receptors"] + += Upgrading receptors + +A software update addresses any issues or bugs to provide a better experience of working with the technology. Anyone with administrative rights can update the receptor on an execution node. + +Red{nbsp}Hat recommends performing updates to the receptor after any {PlatformNameShort} control plane updates. This ensures you are using the latest version. Best practice is to perform regular updates outside of any updates to the control plane. + + +.Procedure + +. Check the current receptor version: ++ +---- +receptor --version +---- ++ +. Update the receptor: ++ +---- +sudo dnf update ansible-runner receptor -y +---- ++ +[NOTE] +==== +To upgrade all packages (not just the receptor), use `dnf update`, then reboot with `reboot`. +==== ++ +. Verify the installation. After the update is complete, check the receptor version again to verify the update: ++ +---- +receptor --version +---- ++ +. Restart the receptor service: ++ +---- +sudo systemctl restart receptor +---- ++ +. Ensure the receptor is working correctly and is properly connected to the controller or other nodes in the system. + + diff --git a/downstream/modules/platform/proc-operator-upgrade.adoc b/downstream/modules/platform/proc-operator-upgrade.adoc index 2bf9e97d62..79828e12ee 100644 --- a/downstream/modules/platform/proc-operator-upgrade.adoc +++ b/downstream/modules/platform/proc-operator-upgrade.adoc @@ -1,16 +1,28 @@ [id="upgrading-operator_{context}"] -= Upgrading the {OperatorPlatform} += Upgrading the {OperatorPlatformNameShort} +To upgrade to the latest version of {OperatorPlatformNameShort} on {OCPShort}, you can do the following: -[role=_abstract] +.Prerequisites -To upgrade to the latest version of {OperatorPlatform} on {OCPShort}, do the following: +* Read the link:{URLReleaseNotes}[{TitleReleaseNotes}] for 2.5 + +* [Optional] You need to deploy all of your {PlatformName} services ({ControllerNAme}, {HubName}, {EDAName}) to the same, single namespace before upgrading to 2.5 (only for existing deployments). For more information see, link:https://access.redhat.com/solutions/7092056[Migrating from one namespace to another]. +* Review the link:{URLOperatorBackup}[{TitleOperatorBackup}] guide and backup your services: +** AutomationControllerBackup +** AutomationHubBackup +** EDABackup .Prodedure . Log in to {OCPShort}. . Navigate to menu:Operators[Installed Operators]. +. Select the {OperatorPlatformNameShort} installed on your project namespace. . Select the *Subscriptions* tab. -. Under *Upgrade status*, click btn:[Upgrade Available]. +. Change the channel from stable-2.4 to stable-2.5. An InstallPlan is created for the user. . Click btn:[Preview InstallPlan]. . Click btn:[Approve]. +. Create a Custom Resource (CR) using the {PlatformNameShort} UI. +The {ControllerName} and {HubName} UIs remain until all SSO configuration is supported in the {Gateway} UI. + +For more information on configuring your updated {OperatorPlatformNameShort}, see xref:configure-aap-operator_operator-platform-doc[Configuring the {OperatorPlatformName} on {OCP}]. diff --git a/downstream/modules/platform/proc-post-migration-cleanup.adoc b/downstream/modules/platform/proc-post-migration-cleanup.adoc index a3f510099f..cf3542764b 100644 --- a/downstream/modules/platform/proc-post-migration-cleanup.adoc +++ b/downstream/modules/platform/proc-post-migration-cleanup.adoc @@ -4,16 +4,31 @@ [role=_abstract] -After your data migration is complete, you must delete any Instance Groups that are no longer required. +After data migration, delete unnecessary instance groups and unlink the `old database configuration secret` from the {ControllerName} resource definition. + +== Deleting Instance Groups post migration .Procedure . Log in to {PlatformName} as the administrator with the password you created during migration. + [NOTE] ==== -Note: If you did not create an administrator password during migration, one was automatically created for you. To locate this password, go to your project, select menu:Workloads[Secrets] and open controller-admin-password. From there you can copy the password and paste it into the {PlatformName} password field. +If you did not create an administrator password during migration, one was automatically created for you. +To locate this password, go to your project, select menu:Workloads[Secrets] and open controller-admin-password. +From there you can copy the password and paste it into the {PlatformName} password field. ==== + . Select {MenuInfrastructureInstanceGroups}. . Select all Instance Groups except controlplane and default. . Click btn:[Delete]. + +== Unlinking the old database configuration secret post migration + +. Log in to *{OCP}*. +. Navigate to menu:Operators[Installed Operators]. +. Select the {OperatorPlatformNameShort} installed on your project namespace. +. Select the *Automation Controller* tab. +. Click your *AutomationController* object. You can then view the object through the *Form view* or *YAML view*. The following inputs are available through the *YAML view*. +. Locate the `old_postgres_configuration_secret` item within the spec section of the YAML contents. +. Delete the line that contains this item. +. Click btn:[Save]. diff --git a/downstream/modules/platform/proc-preparing-the-rhel-host-for-containerized-installation.adoc b/downstream/modules/platform/proc-preparing-the-rhel-host-for-containerized-installation.adoc index e95525966c..e33dadf22b 100644 --- a/downstream/modules/platform/proc-preparing-the-rhel-host-for-containerized-installation.adoc +++ b/downstream/modules/platform/proc-preparing-the-rhel-host-for-containerized-installation.adoc @@ -2,39 +2,59 @@ [id="preparing-the-rhel-host-for-containerized-installation_{context}"] -= Preparing the RHEL host for containerized installation += Preparing the {RHEL} host for containerized installation -[role="_abstract"] +Containerized {PlatformNameShort} runs the component services as Podman based containers on top of a {RHEL} host. Prepare the {RHEL} host to ensure a successful installation. .Procedure -Containerized {PlatformNameShort} runs the component services as podman based containers on top of a RHEL host. The installer takes care of this once the underlying host has been prepared. Use the following instructions for this. - -. Log into your RHEL host as your non-root user. +. Log in to the {RHEL} host as your non-root user. ++ +. Set a hostname that is a fully qualified domain name (FQDN): ++ +---- +sudo hostnamectl set-hostname +---- ++ +. Register your {RHEL} host with `subscription-manager`: ++ +---- +sudo subscription-manager register +---- ++ -. Run *dnf repolist* to validate only the BaseOS and appstream repos are setup and enabled on the host: +. Run `sudo dnf repolist` to validate that only the BaseOS and AppStream repositories are set up and enabled on the host: + ---- -$ dnf repolist +$ sudo dnf repolist Updating Subscription Management repositories. repo id repo name rhel-9-for-x86_64-appstream-rpms Red Hat Enterprise Linux 9 for x86_64 - AppStream (RPMs) rhel-9-for-x86_64-baseos-rpms Red Hat Enterprise Linux 9 for x86_64 - BaseOS (RPMs) ---- + -. Ensure that these repos and only these repos are available to the host OS. If you need to know how to do this use this guide: -link:{BaseURL}/red_hat_enterprise_linux/9/html/managing_software_with_the_dnf_tool/assembly_managing-custom-software-repositories_managing-software-with-the-dnf-tool[Chapter 10. Managing custom software repositories Red Hat Enterprise Linux] +. Ensure that only these repositories are available to the {RHEL} host. For more information about managing custom repositories, see link:{BaseURL}/red_hat_enterprise_linux/9/html/managing_software_with_the_dnf_tool/assembly_managing-custom-software-repositories_managing-software-with-the-dnf-tool[Managing custom software repositories]. -. Ensure that the host has DNS configured and can resolve hostnames and IPs using a fully qualified domain name (FQDN). This is essential to ensure services can talk to one another. +. Ensure that the host has DNS configured and can resolve host names and IP addresses by using a fully qualified domain name (FQDN). This is essential to ensure services can talk to one another. -.Using unbound DNS - -To configure unbound DNS refer to link:{BaseURL}/red_hat_enterprise_linux/9/html/managing_networking_infrastructure_services/assembly_setting-up-an-unbound-dns-server_networking-infrastructure-services[Chapter 2. Setting up an unbound DNS server Red Hat Enterprise Linux 9]. - -.Using BIND DNS +. Install `ansible-core`: ++ +---- +sudo dnf install -y ansible-core +---- ++ +. Optional: You can install additional utilities that can be useful for troubleshooting purposes, for example `wget`, `git-core`, `rsync`, and `vim`: ++ +---- +sudo dnf install -y wget git-core rsync vim +---- -To configure DNS using BIND refer to link:{BaseURL}/red_hat_enterprise_linux/9/html/managing_networking_infrastructure_services/assembly_setting-up-and-configuring-a-bind-dns-server_networking-infrastructure-services[Chapter 1. Setting up and configuring a BIND DNS server Red Hat Enterprise Linux 9]. +. Optional: To have the installation program automatically pick up and apply your {PlatformNameShort} subscription manifest license, follow the steps in link:{URLCentralAuth}/assembly-gateway-licensing#assembly-aap-obtain-manifest-files[Obtaining a manifest file]. -.Optional +[role="_additional-resources"] +.Additional resources +* For more information about registering your RHEL system, see link:{BaseURL}/subscription_central/1-latest/html-single/getting_started_with_rhel_system_registration/index[Getting Started with RHEL System Registration]. +* For information about configuring unbound DNS, see link:{BaseURL}/red_hat_enterprise_linux/9/html/managing_networking_infrastructure_services/assembly_setting-up-an-unbound-dns-server_networking-infrastructure-services[Setting up an unbound DNS server]. +* For information about configuring DNS using BIND, see link:{BaseURL}/red_hat_enterprise_linux/9/html/managing_networking_infrastructure_services/assembly_setting-up-and-configuring-a-bind-dns-server_networking-infrastructure-services[Setting up and configuring a BIND DNS server]. +* For more information about `ansible-core`, see link:https://docs.ansible.com/ansible/latest/[Ansible Core Documentation]. -To have the installer automatically pick up and apply your {PlatformNameShort} subscription manifest license, use this guide to generate a manifest file which can be downloaded for the installer: link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_operations_guide/assembly-aap-obtain-manifest-files[Chapter 2. Obtaining a manifest file Red Hat Ansible Automation Platform 2.]. diff --git a/downstream/modules/platform/proc-projects-using-collections-with-hub.adoc b/downstream/modules/platform/proc-projects-using-collections-with-hub.adoc index 631b6a32bc..660cf193b4 100644 --- a/downstream/modules/platform/proc-projects-using-collections-with-hub.adoc +++ b/downstream/modules/platform/proc-projects-using-collections-with-hub.adoc @@ -17,8 +17,8 @@ Use the following procedure to connect to {PrivateHubName} or {HubName}, the onl . Create a credential by choosing one of the following options: .. To use {HubName}, create an {HubName} credential by using the copied token and pointing to the URLs shown in the *Server URL* and *SSO URL* fields of the token page: + -* *Galaxy Server URL* = `https://console.redhat.com/api/automation-hub/` -* *AUTH SEVER URL* = `https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token` +* *Galaxy Server URL* = `https://console.redhat.com/ansible/automation-hub/token` +//* *AUTH SERVER URL* = `https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token` + .. To use {PrivateHubName}, create an {HubName} credential using a token retrieved from the *Repo Management* dashboard of your {PrivateHubName} and pointing to the published repository URL as shown: //+ @@ -33,7 +33,7 @@ For each repository in {Hubname} you must create a different credential. + //image:projects-create-ah-credential.png[Create hub credential] + -For UI specific instructions, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/managing_content_in_automation_hub/managing-cert-valid-content[Red Hat Certified, validated, and Ansible Galaxy content in automation hub]. +For UI specific instructions, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/managing_automation_content/managing-cert-valid-content[Red Hat Certified, validated, and Ansible Galaxy content in automation hub]. . Go to the organization for which you want to synchronize content from and add the new credential to the organization. This enables you to associate each organization with the credential, or repository, that you want to use content from. @@ -54,7 +54,7 @@ Then you can assign different levels of access to different organizations. For example, you can create a `Developers` organization that has access to both repository, while an Operations organization just has access to the *Prod* repository only. + -For UI specific instructions, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/managing_content_in_automation_hub/index#configuring-user-access-containers[Configuring user access for container repositories in private automation hub]. +For UI specific instructions, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/managing_automation_content/index#configuring-user-access-containers[Configuring user access for container repositories in private automation hub]. . If {HubName} has self-signed certificates, use the toggle to enable the setting *Ignore Ansible Galaxy SSL Certificate Verification* in *Job Settings*. For {HubName}, which uses a signed certificate, use the toggle to disable it instead. This is a global setting: diff --git a/downstream/modules/platform/proc-provision-ocp-storage-with-readwritemany.adoc b/downstream/modules/platform/proc-provision-ocp-storage-with-readwritemany.adoc index 9d5440177b..1434a84dd0 100644 --- a/downstream/modules/platform/proc-provision-ocp-storage-with-readwritemany.adoc +++ b/downstream/modules/platform/proc-provision-ocp-storage-with-readwritemany.adoc @@ -3,10 +3,12 @@ = Provisioning OCP storage with `ReadWriteMany` access mode -To ensure successful installation of {OperatorPlatform}, you must provision your storage type for {HubName} initially to `ReadWriteMany` access mode. +To ensure successful installation of {OperatorPlatformNameShort}, you must provision your storage type for {HubName} initially to `ReadWriteMany` access mode. .Procedure -. Click link:{BaseURL}/openshift_container_platform/4.10/html-single/storage/index#persistent-storage-nfs-provisioning_persistent-storage-nfs[Provisioning] to update the access mode. +. Go to menu:Storage[PersistentVolume]. +. Click btn:[Create PersistentVolume]. . In the first step, update the `accessModes` from the default `ReadWriteOnce` to `ReadWriteMany`. +.. See link:{BaseURL}/openshift_container_platform/4.10/html-single/storage/index#persistent-storage-nfs-provisioning_persistent-storage-nfs[Provisioning] to update the access mode. for a detailed overview. . Complete the additional steps in this section to create the persistent volume claim (PVC). diff --git a/downstream/modules/platform/proc-pulling-the-secret.adoc b/downstream/modules/platform/proc-pulling-the-secret.adoc index a05c8fa5ca..029449fbe3 100644 --- a/downstream/modules/platform/proc-pulling-the-secret.adoc +++ b/downstream/modules/platform/proc-pulling-the-secret.adoc @@ -19,14 +19,19 @@ oc create secret generic ee-pull-secret \ --from-literal=username= \ --from-literal=password= \ --from-literal=url=registry.redhat.io - -oc edit automationcontrollers ---- -. Add `ee_pull_credentials_secret` and `ee-pull-secret` to the specification using: +. Add `ee_pull_credentials_secret` and `ee-pull-secret` to the specification by editing the deployment specification: ++ +---- +oc edit automationcontrollers aap-controller-o yaml +---- ++ +and add the following: + ---- -spec.ee_pull_credentials_secret=ee-pull-secret +spec + ee_pull_credentials_secret=ee-pull-secret ---- . To manage instances from the {ControllerName} UI, you must have System Administrator or System Auditor permissions. diff --git a/downstream/modules/platform/proc-reinstalling-containerized-aap.adoc b/downstream/modules/platform/proc-reinstalling-containerized-aap.adoc new file mode 100644 index 0000000000..9a1505ba90 --- /dev/null +++ b/downstream/modules/platform/proc-reinstalling-containerized-aap.adoc @@ -0,0 +1,13 @@ +:_mod-docs-content-type: PROCEDURE + +[id="reinstalling-containerized-aap_{context}"] += Reinstalling containerized {PlatformNameShort} + +[role="_abstract"] + +To reinstall a containerized deployment after uninstalling and preserving the database, run the `install` playbook and include the existing secret key value: +---- +$ ansible-playbook -i inventory ansible.containerized_installer.install -e controller_secret_key= +---- + +For more information about the `*_secret_key` variables, see link:{URLContainerizedInstall}/appendix-inventory-files-vars[Inventory file variables]. diff --git a/downstream/modules/platform/proc-restore-aap-container.adoc b/downstream/modules/platform/proc-restore-aap-container.adoc new file mode 100644 index 0000000000..a30d12395d --- /dev/null +++ b/downstream/modules/platform/proc-restore-aap-container.adoc @@ -0,0 +1,23 @@ +[id="proc-restore-aap-container"] + += Restoring container-based {PlatformNameShort} + +Restore your {ContainerBase} of {PlatformNameShort} from a backup. + +.Procedure + +. Go to the {PlatformName} installation directory on your {RHEL} host. + +. Run the `restore` playbook: ++ +---- +$ ansible-playbook -i ansible.containerized_installer.restore +---- + +This restores the important data deployed by the containerized installer such as: + +* PostgreSQL databases +* Configuration files +* Data files + +By default, the backup directory is set to `~/backups`. You can change this by using the `backup_dir` variable in your `inventory` file. \ No newline at end of file diff --git a/downstream/modules/platform/proc-run-jobs-on-execution-nodes.adoc b/downstream/modules/platform/proc-run-jobs-on-execution-nodes.adoc index cdbc91779a..0e324fb63b 100644 --- a/downstream/modules/platform/proc-run-jobs-on-execution-nodes.adoc +++ b/downstream/modules/platform/proc-run-jobs-on-execution-nodes.adoc @@ -6,7 +6,7 @@ You must specify where jobs are run, or they default to running in the control c To do this, set up a Job Template. -For more information on Job Templates, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-job-templates[Job Templates] in the _{ControllerUG}_. +For more information on Job Templates, see link:{URLControllerUserGuide}/controller-job-templates[Job templates] in _{ControllerUG}_. .Procedure diff --git a/downstream/modules/platform/proc-running-setup-script-for-updates.adoc b/downstream/modules/platform/proc-running-setup-script-for-updates.adoc index 255ef177c6..8f6cf97e25 100644 --- a/downstream/modules/platform/proc-running-setup-script-for-updates.adoc +++ b/downstream/modules/platform/proc-running-setup-script-for-updates.adoc @@ -1,4 +1,4 @@ -// [id="proc-running-setup-script-for-updates_{context}"] +[id="proc-running-setup-script-for-updates"] = Running the {PlatformName} installer setup script @@ -7,10 +7,15 @@ You can run the setup script once you have finished updating the `inventory` fil .Procedure -. Run the `setup.sh` script +* Run the `setup.sh` script: + ----- $ ./setup.sh ----- -The installation will begin. +The installation will begin. + +[role="_additional-resources"] +.Next steps +If you are upgrading from {PlatformNameShort} 2.4 to 2.5, proceed to +xref:account-linking_aap-post-upgrade[Linking your account] to link your existing service level accounts to a single unified platform account. diff --git a/downstream/modules/platform/proc-running-setup-script.adoc b/downstream/modules/platform/proc-running-setup-script.adoc index c43a9e4a53..f315a59cec 100644 --- a/downstream/modules/platform/proc-running-setup-script.adoc +++ b/downstream/modules/platform/proc-running-setup-script.adoc @@ -3,7 +3,7 @@ = Running the {PlatformName} installer setup script [role="_abstract"] -After you update the inventory file with required parameters for installing your {PrivateHubName}, run the installer setup script. +After you update the inventory file with required parameters, run the installer setup script. .Procedure @@ -13,4 +13,20 @@ After you update the inventory file with required parameters for installing your $ sudo ./setup.sh ----- +[NOTE] +==== +If you are running the setup as a non-root user with `sudo` privileges, you can use the following command: +---- +$ ANSIBLE_BECOME_METHOD='sudo' +ANSIBLE_BECOME=True ./setup.sh +---- +==== + Installation of {PlatformName} will begin. + +.Additional resources +See link:https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_privilege_escalation.html[Understanding privilege escalation] for additional `setup.sh` script examples. + +ifdef::mesh-VM[] +If you want to add additional nodes to your {AutomationMesh} after the initial setup, edit the inventory file to add the new node, then rerun the `setup.sh` script. +endif::mesh-VM[] diff --git a/downstream/modules/platform/proc-scm-git-subversion.adoc b/downstream/modules/platform/proc-scm-git-subversion.adoc index e860ca9abc..8fe7901190 100644 --- a/downstream/modules/platform/proc-scm-git-subversion.adoc +++ b/downstream/modules/platform/proc-scm-git-subversion.adoc @@ -6,39 +6,38 @@ . From the navigation panel, select {MenuAEProjects}. . Click the project name you want to use. . In the project *Details* tab, click btn:[Edit project]. -. Select the appropriate option (Git or Subversion) from the *Source Control Type* menu. +. Select the appropriate option (Git or Subversion) from the *Source control type* menu. + //image:projects-create-scm-project.png[Select scm] . Enter the appropriate details into the following fields: -* *Source Control URL* - See an example in the tooltip . -* Optional: *Source Control Branch/Tag/Commit*: Enter the SCM branch, tags, commit hashes, arbitrary refs, or revision number (if applicable) from the source control (Git or Subversion) to checkout. -Some commit hashes and references might not be available unless you also provide a custom refspec in the next field. +* *Source control URL* - See an example in the tooltip . +* Optional: *Source control branch/tag/commit*: Enter the SCM branch, tags, commit hashes, arbitrary refs, or revision number (if applicable) from the source control (Git or Subversion) to checkout. +Some commit hashes and references might not be available unless you also give a custom refspec in the next field. If left blank, the default is `HEAD` which is the last checked out Branch, Tag, or Commit for this project. -* *Source Control Refspec* - This field is an option specific to git source control and only advanced users familiar and comfortable with git should specify which references to download from the remote repository. +* *Source control refspec* - This field is an option specific to git source control and only advanced users familiar and comfortable with git should specify which references to download from the remote repository. For more information, see xref:controller-job-branch-overriding[Job branch overriding]. -* *Source Control Credential* - If authentication is required, select the appropriate source control credential. +* *Source control credential* - If authentication is required, select the appropriate source control credential. . Optional: *Options* - select the launch behavior, if applicable: * *Clean* - Removes any local modifications before performing an update. * *Delete* - Deletes the local repository in its entirety before performing an update. Depending on the size of the repository this can significantly increase the amount of time required to complete an update. * *Track submodules* - Tracks the latest commit. There is more information in the tooltip image:question_circle.png[Tooltip,15,15]. -* *Update Revision on Launch* - Updates the revision of the project to the current revision in the remote source control, and caching the roles directory from link:https://docs.ansible.com/automation-controller/latest/html/userguide/projects.html#ug-galaxy[Galaxy] or -xref:ref-projects-collections-support[Collections support]. +* *Update revision on launch* - Updates the revision of the project to the current revision in the remote source control, and caching the roles directory from xref:ref-projects-galaxy-support[Ansible Galaxy support] or xref:ref-projects-collections-support[Collections support]. {ControllerNameStart} ensures that the local revision matches and that the roles and collections are up-to-date with the last update. In addition, to avoid job overflows if jobs are spawned faster than the project can synchronize, selecting this enables you to configure a Cache Timeout to cache previous project synchronizations for a given number of seconds. -* *Allow Branch Override* - Enables a job template or an inventory source that uses this project to start with a specified SCM branch or revision other than that of the project. +* *Allow branch override* - Enables a job template or an inventory source that uses this project to start with a specified SCM branch or revision other than that of the project. For more information, see xref:controller-job-branch-overriding[Job branch overriding]. + -image:projects-create-scm-project-branch-override-checked.png[Override options] +//image:projects-create-scm-project-branch-override-checked.png[Override options] . Click btn:[Save project]. -[TIP] -==== -Using a GitHub link is an easy way to use a playbook. -To help get you started, use the `helloworld.yml` file available link:https://github.com/ansible/tower-example.git[here]. +//[TIP] +//==== +//Using a GitHub link is an easy way to use a playbook. +//To help get you started, use the `helloworld.yml` file available link:https://github.com/ansible/tower-example.git[here]. -This link offers a very similar playbook to the one created manually in the instructions found in link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_automation_controller/index[{ControllerGS}]. -Using it will not alter or harm your system in any way. -==== +//This link offers a very similar playbook to the one created manually in the instructions found in link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_automation_controller/index[{ControllerGS}]. +//Using it will not alter or harm your system in any way. +//==== diff --git a/downstream/modules/platform/proc-scm-insights.adoc b/downstream/modules/platform/proc-scm-insights.adoc index aac9bdc3cf..00702beef7 100644 --- a/downstream/modules/platform/proc-scm-insights.adoc +++ b/downstream/modules/platform/proc-scm-insights.adoc @@ -7,14 +7,14 @@ . Click the project name you want to use. . In the project *Details* tab, click btn:[Edit project]. . Select *Red Hat Insights* from the *Source Control Type* menu. -. In the *Credential* field, select the appropriate credential for use with Insights, as Red Hat Insights requires a credential for authentication. +. In the *Insights credential* field, select the appropriate credential for use with Insights, as Red Hat Insights requires a credential for authentication. . Optional: In the *Options* field, select the launch behavior, if applicable: * *Clean* - Removes any local modifications before performing an update. * *Delete* - Deletes the local repository in its entirety before performing an update. Depending on the size of the repository this can significantly increase the amount of time required to complete an update. -* *Update Revision on Launch* - Updates the revision of the project to the current revision in the remote source control, and caches the +* *Update revision on launch* - Updates the revision of the project to the current revision in the remote source control, and caches the roles directory from xref:ref-projects-galaxy-support[{Galaxy} support] or xref:ref-projects-collections-support[Collections support]. {ControllerNameStart} ensures that the local revision matches, and that the roles and collections are up-to-date. If jobs are spawned faster than the project can synchronize, selecting this enables you to configure a Cache Timeout to diff --git a/downstream/modules/platform/proc-scm-remote-archive.adoc b/downstream/modules/platform/proc-scm-remote-archive.adoc index ed5bb7b287..a443f5c97f 100644 --- a/downstream/modules/platform/proc-scm-remote-archive.adoc +++ b/downstream/modules/platform/proc-scm-remote-archive.adoc @@ -9,20 +9,20 @@ containing all the requirements for that project in a single archive. . From the navigation panel, select {MenuAEProjects}. . Click the project name you want to use. . In the project *Details* tab, click btn:[Edit project]. -. Select *Remote Archive* from the *Source Control Type* menu. +. Select *Remote Archive* from the *Source control type* menu. . Enter the appropriate details into the following fields: -* *Source Control URL* - requires a URL to a remote archive, such as a _GitHub Release_ or a build artifact stored in _Artifactory_ and unpacks it into +* *Source control URL* - requires a URL to a remote archive, such as a _GitHub Release_ or a build artifact stored in _Artifactory_ and unpacks it into the project path for use. -* *Source Control Credential* - If authentication is required, select the appropriate source control credential. +* *Source control credential* - If authentication is required, select the appropriate source control credential. . Optional: In the *Options* field, select the launch behavior, if applicable: * *Clean* - Removes any local modifications before performing an update. * *Delete* - Deletes the local repository in its entirety before performing an update. Depending on the size of the repository this can significantly increase the amount of time required to complete an update. -* *Update Revision on Launch* - Not recommended. This option updates the revision of the project to the current revision in the remote source control, and caches the roles directory from xref:ref-projects-galaxy-support[{Galaxy} support] or xref:ref-projects-collections-support[Collections support]. -* *Allow Branch Override* - Not recommended. This option enables a job template that uses this project to launch with a specified SCM branch or revision other than that of the project's. +* *Update revision on launch* - Not recommended. This option updates the revision of the project to the current revision in the remote source control, and caches the roles directory from xref:ref-projects-galaxy-support[{Galaxy} support] or xref:ref-projects-collections-support[Collections support]. +* *Allow branch override* - Not recommended. This option enables a job template that uses this project to launch with a specified SCM branch or revision other than that of the project's. + //image:projects-create-scm-rm-archive.png[Remote archived project] + diff --git a/downstream/modules/platform/proc-set-registry-username-password.adoc b/downstream/modules/platform/proc-set-registry-username-password.adoc new file mode 100644 index 0000000000..1fa74ba030 --- /dev/null +++ b/downstream/modules/platform/proc-set-registry-username-password.adoc @@ -0,0 +1,27 @@ +[id="proc-set-registry-username-password"] + += Setting registry_username and registry_password + +When using the `registry_username` and `registry_password` variables for an online non-bundled installation, you need to create a new registry service account. + +Registry service accounts are named tokens that can be used in environments where credentials will be shared, such as deployment systems. + +.Procedure +. Go to https://access.redhat.com/terms-based-registry/accounts. +. On the *Registry Service Accounts* page click btn:[New Service Account]. +. Enter a name for the account using only the allowed characters. +. Optionally enter a description for the account. +. Click btn:[Create]. +. Find the created account in the list by searching for your name in the search field. +. Click the name of the account that you created. +. Alternatively, if you know the name of your token, you can go directly to the page by entering the URL: ++ +---- +https://access.redhat.com/terms-based-registry/token/ +---- ++ +. A *token* page opens, displaying a generated username (different from the account name) and a token. ++ +If no token is displayed, click btn:[Regenerate Token]. You can also click this to generate a new username and token. +. Copy the username (for example "1234567|testuser") and use it to set the variable `registry_username`. +. Copy the token and use it to set the variable `registry_password`. diff --git a/downstream/modules/platform/proc-set-up-virtual-machines.adoc b/downstream/modules/platform/proc-set-up-virtual-machines.adoc index 7ceda4aaec..bc78f82fb9 100644 --- a/downstream/modules/platform/proc-set-up-virtual-machines.adoc +++ b/downstream/modules/platform/proc-set-up-virtual-machines.adoc @@ -41,22 +41,40 @@ For more information about Simple Content Access, see link:{BaseURL}/subscriptio . Enable {PlatformNameShort} subscriptions and the proper {PlatformName} channel: + +For RHEL 8 ++ ---- -# subscription-manager repos --enable ansible-automation-platform-2.4-for-rhel-8-x86_64-rpms for RHEL 8 - -# subscription-manager repos --enable ansible-automation-platform-2.4-for-rhel-9-x86_64-rpms for RHEL 9 +# subscription-manager repos --enable ansible-automation-platform-2.5-for-rhel-8-x86_64-rpms ---- - ++ +For RHEL 9 ++ +---- +# subscription-manager repos --enable ansible-automation-platform-2.5-for-rhel-9-x86_64-rpms +---- ++ +For ARM ++ +---- +# subscription-manager repos --enable ansible-automation-platform-2.5-for-rhel-aarch64-rpms +---- ++ . Ensure the packages are up to date: + ---- sudo dnf upgrade -y ---- -. Install the ansible-core packages: +. Install the ansible-core packages on the machine where the downloaded bundle is to run: + ---- sudo dnf install -y ansible-core ---- - ++ +[NOTE] +==== +Ansible core is required on the machine that runs the {AutomationMesh} configuration bundle playbooks. This document assumes that happens on the execution node. +However, this step can be omitted if you run the playbook from a different machine. +You cannot run directly from the control node, this is not currently supported, but future support expects that the control node has direct connectivity to the execution node. +==== diff --git a/downstream/modules/platform/proc-settings-gw-additional-options.adoc b/downstream/modules/platform/proc-settings-gw-additional-options.adoc new file mode 100644 index 0000000000..b3bc400b8d --- /dev/null +++ b/downstream/modules/platform/proc-settings-gw-additional-options.adoc @@ -0,0 +1,34 @@ +[id="proc-settings-gw-other-options"] + += Configuring additional platform options + +//Content divided into multiple procedures to address issue AAP-30592 + +From the *{GatewayStart} settings* page, you can configure additional platform options. + +.Procedure +. From the navigation panel, select {MenuSetGateway}. +. The *{GatewayStart} settings* page is displayed. +. Click btn:[Edit {Gateway} settings]. +. You can configure the following *Other settings*: ++ +* *Jwt expiration buffer in seconds*: The number of seconds before a JWT token's expiration to revoke from the cache. ++ +When authentication happens a JWT token is created for the user and that token is cached. +When subsequent calls happen to services such as {ControllerName} or {EDAName}, the token is taken from the cache and sent to the service. +Both the token and the cache of the token have an expiration time. +If the token expires while in the cache the authentication process attempts results in a 401 error (unauthorized). +This setting gives {PlatformName} a buffer by removing the JWT token from the cache before the token expires. +When a token is revoked from cache a new token with a new expiration is generated and cached for the user. +As a result, expired tokens from the cache are never used. +This setting defaults to 2 seconds. +If you have a large latency between {Gateway} and your services and observe 401 responses you must increase this setting to lower the number of 401 responses. +* *Status endpoint backend timeout seconds*: Timeout (in seconds) for the status endpoint to wait when trying to connect to a backend. +* *Status endpoint backend verify*: Specifies whether SSL certificates of the services are verified when calling individual nodes for statuses. +* *Request timeout*: Specifies, in seconds, the length of time before the proxy will report a timeout and generate a 504. +* *Allow external users to create OAuth2 tokens *: For security reasons, users from external authentication providers, such as LDAP, SAML, SSO, Radius, and others, are not allowed to create OAuth2 tokens. +To change this behavior, enable this setting. +Existing tokens are not deleted when this setting is turned off. ++ +. Click btn:[Save {Gateway} settings] to save the changes or proceed to configure the other platform options available. + diff --git a/downstream/modules/platform/proc-settings-gw-custom-login.adoc b/downstream/modules/platform/proc-settings-gw-custom-login.adoc new file mode 100644 index 0000000000..c57bb531e3 --- /dev/null +++ b/downstream/modules/platform/proc-settings-gw-custom-login.adoc @@ -0,0 +1,18 @@ +[id="proc-settings-gw-custom-login"] + += Configuring a custom platform log in + +//Content divided into multiple procedures to address issue AAP-30592 + +From the *{GatewayStart} settings* page, you can configure the custom log in options. + +.Procedure +. From the navigation panel, select {MenuSetGateway}. +. The *{GatewayStart} settings* page is displayed. +. To configure the options, click btn:[Edit {Gateway} settings]. +. You can configure the following *Custom Login* options: ++ +* *Custom login info*: Provide specific information (such as a legal notice or a disclaimer) to a text box in the login modal. For example, you can include a company banner with a statement such as, “This is only to be used for ``, etc.” +* *Custom logo* : Provide an image file for setting up a custom logo (must be a data URL with a base64-encoded GIF, PNG, or JPEG image). ++ +. Click btn:[Save {Gateway} settings] to save the changes or proceed to configure the other platform options available. diff --git a/downstream/modules/platform/proc-settings-gw-password-security.adoc b/downstream/modules/platform/proc-settings-gw-password-security.adoc new file mode 100644 index 0000000000..2cdeb10b4c --- /dev/null +++ b/downstream/modules/platform/proc-settings-gw-password-security.adoc @@ -0,0 +1,21 @@ +[id="proc-settings-gw-password-security"] + += Configuring a platform password security policy + +//Content divided into multiple procedures to address issue AAP-30592 + +From the *{GatewayStart} settings* page, you can configure a password security policy. + +.Procedure +. From the navigation panel, select {MenuSetGateway}. +. The *{GatewayStart} settings* page is displayed. +. To configure the options, click btn:[Edit {Gateway} settings]. +. You can configure the following *Password Security* options: ++ +* *Password minimum uppercase letters*: How many uppercase characters need to be in a local password. +* *Password minimum length*: The minimum length of a local password. +* *Password minimum numerical digits*: How many numerical characters need to be in a local password. +* *Password minimum special characters*: How many special characters need to be in a local password. ++ +. Click btn:[Save {Gateway} settings] to save the changes or proceed to configure the other platform options available. + diff --git a/downstream/modules/platform/proc-settings-gw-security-options.adoc b/downstream/modules/platform/proc-settings-gw-security-options.adoc new file mode 100644 index 0000000000..e4ec750186 --- /dev/null +++ b/downstream/modules/platform/proc-settings-gw-security-options.adoc @@ -0,0 +1,50 @@ +[id="proc-settings-gw-security-options"] + += Configuring platform security + +//Content divided into multiple procedures to address issue AAP-30592 + +From the *{GatewayStart} settings* page, you can configure platform security settings. + +.Procedure +. From the navigation panel, select {MenuSetGateway}. +. The *{GatewayStart} settings* page is displayed. +. To configure the options, click btn:[Edit]. +. You can configure the following *Security* settings: ++ +* *Allow admin to set insecure*: Whether a superuser account can save an insecure password when editing any local user account. +* *Gateway basic auth enabled*: Enable basic authentication to the {Gateway} API. ++ +Turning this off prevents all basic authentication (local users), so customers need to make sure they have their alternative authentication mechanisms correctly configured before doing so. ++ +Turning it off with only local authentication configured also prevents all access to the UI. ++ +*Social auth username is full email*: Enabling this setting alerts social authentication to use the full email as username instead of the full name. ++ +*Gateway token name*: The header name to push from the proxy to the backend service. ++ +[WARNING] +==== +If this name is changed, backends must be updated to compensate. +==== ++ +* *Gateway access token expiration*: How long the access tokens are valid for. +* *Jwt private key*: The private key used to encrypt the JWT tokens sent to backend services. ++ +This should be a private RSA key and one should be generated automatically on installation. ++ +[NOTE] +==== +Use caution when rotating the key as it will cause current sessions to fail until their JWT keys are reset. +==== ++ +* (Read only) *Jwt public key*: The private key used to encrypt the JWT tokens sent to backend services. ++ +This should be a private RSA key and one should be generated automatically on installation. ++ +[NOTE] +==== +See other services' documentation on how they consume this key. +==== ++ +. Click btn:[Save changes] to save the changes or proceed to configure the other platform options available. \ No newline at end of file diff --git a/downstream/modules/platform/proc-settings-gw-session-options.adoc b/downstream/modules/platform/proc-settings-gw-session-options.adoc new file mode 100644 index 0000000000..a6e537a2e1 --- /dev/null +++ b/downstream/modules/platform/proc-settings-gw-session-options.adoc @@ -0,0 +1,15 @@ +[id="proc-settings-gw-session-options"] + += Configuring platform sessions + +//Content divided into multiple procedures to address issue AAP-30592 + +From the *{GatewayStart} settings* page, you can configure platform session settings. + +.Procedure +. From the navigation panel, select {MenuSetGateway}. +. The *{GatewayStart} settings* page is displayed. +. To configure the options, click btn:[Edit {Gateway} settings]. +. Enter the time in seconds before a session expires in the *Session cookie age* field. +. Click btn:[Save {Gateway} settings] to save the changes or proceed to configure the other platform options available. + diff --git a/downstream/modules/platform/proc-settings-platform-gateway.adoc b/downstream/modules/platform/proc-settings-platform-gateway.adoc new file mode 100644 index 0000000000..6f51652f29 --- /dev/null +++ b/downstream/modules/platform/proc-settings-platform-gateway.adoc @@ -0,0 +1,26 @@ +[id="proc-settings-platform-gateway"] + += {GatewayStart} + +//To be added to Donna's AAP/UI document for 2.5 +//Content divided into multiple procedures to address issue AAP-30592 + +The {Gateway} is the service that handles authentication and authorization for {PlatformNameShort}. +It provides a single ingress into the Platform and serves the Platform's user interface. + +From the {MenuSetGateway} menu, you can configure *{GatewayStart}*, +*Security*, *Session*, *Platform Security*, *Custom Login*, and *Other* settings. + +.Procedure +. From the navigation panel, select {MenuSetGateway}. +. The *{GatewayStart} settings* page is displayed. +//[Removing screen captures but they can be added back if requested.] +//image::platform_gateway_settings_page.png[Initial {Gateway} settings page] +. To configure the options, click btn:[Edit {Gateway} settings]. +//image::platform_gateway_full.png[{GatewayStart} configurable options] +. You can configure the following {Gateway} options: ++ +* *{GatewayStart} proxy url*: URL to the {Gateway} proxy layer. +* *{GatewayStart} proxy url ignore cert*: Ignore the certificate to the {Gateway} proxy layer. ++ +. Click btn:[Save {Gateway} settings] to save the changes or proceed to configure the other platform options available. \ No newline at end of file diff --git a/downstream/modules/platform/proc-settings-troubleshooting.adoc b/downstream/modules/platform/proc-settings-troubleshooting.adoc new file mode 100644 index 0000000000..8de0cf7a6e --- /dev/null +++ b/downstream/modules/platform/proc-settings-troubleshooting.adoc @@ -0,0 +1,20 @@ +[id="proc-settings-troubleshooting"] + +//To be added to Donna's AAP/UI document for 2.5 += Troubleshooting options + +You can use the *Troubleshooting* page to enable or disable certain flags that aid in debugging issues within {PlatformNameShort}. + +.Procedure +. From the navigation panel, select {MenuSetTroubleshooting}. +. The *Troubleshooting* page is displayed. +. Click btn:[Edit]. +//[ddacosta] Removing screen captures but they can be added back if requested. +//image::troubleshooting_options.png[Troubleshooting options] +. You can select the following options: ++ +* *Enable or Disable tmp dir cleanup*: Select this to enable or disable the cleanup of tmp directories generated during execution of a job after job execution completes. +* *Debug Web Requests*: Select this to enable or disable web request profiling for debugging slow web requests. +* *Release Receptor Work*: Select this to turn on or off the deletion of job pods after they complete or fail. This can be helpful in debugging why a job failed. +* *Keep receptor work on error*: Select this to prevent receptor work from being released when an error is detected. +. Click btn:[Save] to save your changes. diff --git a/downstream/modules/platform/proc-settings-user-preferences.adoc b/downstream/modules/platform/proc-settings-user-preferences.adoc new file mode 100644 index 0000000000..79a25735f9 --- /dev/null +++ b/downstream/modules/platform/proc-settings-user-preferences.adoc @@ -0,0 +1,50 @@ +[id="proc-settings-user-preferences"] + += User preferences + +//To be added to Donna's AAP/UI document for 2.5 + +You can use the *User preferences* page to customize your platform experience. Use this menu to control theming, layout options and formatting. + +[NOTE] +==== +User preferences are stored locally in your browser. This means that they are unique to you and your machine. +==== + +.Procedure + +. From the navigation panel, select {MenuSetUserPref}. +. The *User Preferences page* is displayed. +. Click btn:[Edit]. +. You can configure the following options: ++ +* *Refresh interval*: Select the refresh interval for the page. ++ +This refreshes the data on the page at the selected interval. ++ +The refresh happens in the background and does not reload the page. ++ +* *Color theme*: Select from: +** Dark theme +** Light theme +** System default ++ +* *Table layout*: Select from: +** Comfortable +** Compact ++ +* *Form columns*: Select from: +** Multiple columns of inputs +** Single column of inputs +//[ddacosta] 9/20/24 Form labels is no longer in the UI +//* *Form Labels*: Select from: +//** Labels above inputs +//** Labels beside inputs ++ +* *Date format* Select from: +** Shows dates *Relative* to the current time +** Shows dates as *Date and time* ++ +* *Preferred data format*: Sets the default format for when editing and displaying data. ++ +. Click btn:[Save user preferences]. diff --git a/downstream/modules/platform/proc-setup-postgresql-ext-database-containerized.adoc b/downstream/modules/platform/proc-setup-postgresql-ext-database-containerized.adoc new file mode 100644 index 0000000000..dea1597382 --- /dev/null +++ b/downstream/modules/platform/proc-setup-postgresql-ext-database-containerized.adoc @@ -0,0 +1,132 @@ +[id="proc-setup-postgresql-ext-database-containerized"] + += Setting up an external (customer supported) database + +[IMPORTANT] +==== +* When using an external database with {PlatformNameShort}, you must create and maintain that database. Ensure that you clear your external database when uninstalling {PlatformNameShort}. + +* {PlatformName} {PlatformVers} uses {PostgresVers} and requires the external (customer supported) databases to have ICU support. + +* During configuration of an external database, you must check the external database coverage. For more information, see link:https://access.redhat.com/articles/4010491[{PlatformName} Database Scope of Coverage]. +==== + +There are two possible scenarios for setting up an external database: + +. An external database with PostgreSQL admin credentials +. An external database without PostgreSQL admin credentials + +== Setting up an external database with PostgreSQL admin credentials + +If you have PostgreSQL admin credentials, you can supply them in the inventory file and the installation program creates the PostgreSQL users and databases for each component for you. The PostgreSQL admin account must have `SUPERUSER` privileges. + +To configure the PostgreSQL admin credentials, add the following variables to the inventory file under the `[all:vars]` group: + +---- +postgresql_admin_username= +postgresql_admin_password= +---- + +== Setting up an external database without PostgreSQL admin credentials + +If you do not have PostgreSQL admin credentials, then PostgreSQL users and databases need to be created for each component ({Gateway}, {ControllerName}, {HubName}, and {EDAName}) before running the installation program. + +.Procedure + +. Connect to a PostgreSQL compliant database server with a user that has `SUPERUSER` privileges. ++ +---- +# psql -h -U -p +---- ++ +For example: ++ +---- +# psql -h db.example.com -U superuser -p 5432 +---- ++ +. Create the user with a password and ensure the `CREATEDB` role is assigned to the user. For more information, see link:https://www.postgresql.org/docs/13/user-manag.html[Database Roles]. ++ +---- +CREATE USER WITH PASSWORD CREATEDB; +---- ++ +For example: ++ +---- +CREATE USER hub_user WITH PASSWORD CREATEDB; +---- ++ +. Create the database and add the user you created as the owner. ++ +---- +CREATE DATABASE OWNER ; +---- ++ +For example: ++ +---- +CREATE DATABASE hub_database OWNER hub_user; +---- ++ +. When you have created the PostgreSQL users and databases for each component, you can supply them in the inventory file under the `[all:vars]` group. ++ +[source,yaml,subs="+attributes"] +---- +# {GatewayStart} +gateway_pg_host=aap.example.org +gateway_pg_database= +gateway_pg_username= +gateway_pg_password= + +# {ControllerNameStart} +controller_pg_host=aap.example.org +controller_pg_database= +controller_pg_username= +controller_pg_password= + +# {HubNameStart} +hub_pg_host=aap.example.org +hub_pg_database= +hub_pg_username= +hub_pg_password= + +# {EDAName} +eda_pg_host=aap.example.org +eda_pg_database= +eda_pg_username= +eda_pg_password= +---- + +include::proc-enable-hstore-extension.adoc[leveloffset=+1] + +== Optional: enabling mutual TLS (mTLS) authentication + +mTLS authentication is disabled by default. To configure each component's database with mTLS authentication, add the following variables to your inventory file under the `[all:vars]` group and ensure each component has a different TLS certificate and key: + +[source,yaml,subs="+attributes"] +---- +# {GatewayStart} +gateway_pg_cert_auth=true +gateway_pg_tls_cert=/path/to/gateway.cert +gateway_pg_tls_key=/path/to/gateway.key +gateway_pg_sslmode=verify-full + +# {ControllerNameStart} +controller_pg_cert_auth=true +controller_pg_tls_cert=/path/to/awx.cert +controller_pg_tls_key=/path/to/awx.key +controller_pg_sslmode=verify-full + +# {HubNameStart} +hub_pg_cert_auth=true +hub_pg_tls_cert=/path/to/pulp.cert +hub_pg_tls_key=/path/to/pulp.key +hub_pg_sslmode=verify-full + +# {EDAName} +eda_pg_cert_auth=true +eda_pg_tls_cert=/path/to/eda.cert +eda_pg_tls_key=/path/to/eda.key +eda_pg_sslmode=verify-full +---- diff --git a/downstream/modules/platform/proc-setup-postgresql-ext-database.adoc b/downstream/modules/platform/proc-setup-postgresql-ext-database.adoc index fcf250311e..c8c8e69fb3 100644 --- a/downstream/modules/platform/proc-setup-postgresql-ext-database.adoc +++ b/downstream/modules/platform/proc-setup-postgresql-ext-database.adoc @@ -4,36 +4,37 @@ [IMPORTANT] ==== -Red Hat does not support the use of external (customer supported) databases, however they are used by customers. -The following guidance on inital configuration, from a product installation perspective only, is provided to avoid related support requests. +* When using an external database with {PlatformNameShort}, you must create and maintain that database. Ensure that you clear your external database when uninstalling {PlatformNameShort}. + +* {PlatformName} {PlatformVers} uses {PostgresVers} and requires the external (customer supported) databases to have ICU support. ==== -To create a database, user and password on an external PostgreSQL compliant database for use with {ControllerName}, use the following procedure. +{PlatformName} {PlatformVers} uses {PostgresVers} and requires the external (customer supported) databases to have ICU support. Use the following procedure to configure an external PostgreSQL compliant database for use with an {PlatformNameShort} component, for example {ControllerName}, {EDAName}, {HubName}, and {Gateway}. .Procedure -. Install and then connect to a PostgreSQL compliant database server with superuser privileges. +. Connect to a PostgreSQL compliant database server with superuser privileges. + ---- # psql -h -U superuser -p 5432 -d postgres : ---- + -Where: +. Where the default value for is *hostname*: + ---- -h hostname --host=hostname ---- + -Specifies the host name of the machine on which the server is running. -If the value begins with a slash, it is used as the directory for the Unix-domain socket. +. Specify the hostname of the machine on which the server is running. +If the value begins with a slash, it is used as the directory for the UNIX-domain socket. + ---- -d dbname --dbname=dbname ---- + -Specifies the name of the database to connect to. -This is equivalent to specifying `dbname` as the first non-option argument on the command line. +. Specify the name of the database to connect to. +This is equal to specifying `dbname` as the first non-option argument on the command line. The `dbname` can be a connection string. If so, connection string parameters override any conflicting command line options. + @@ -42,31 +43,115 @@ If so, connection string parameters override any conflicting command line option --username=username ---- + -Connect to the database as the user `username` instead of the default. (You must have permission to do so.) +. Connect to the database as the user `username` instead of the default (you must have permission to do so). -. Create the user, database, and password with the `createDB` or administrator role assigned to the user. +. Create the user, database, and password with the `createDB` or `administrator` role assigned to the user. For further information, see link:https://www.postgresql.org/docs/13/user-manag.html[Database Roles]. -. Add the database credentials and host details to the {ControllerName} inventory file as an external database. +. Add the database credentials and host details to the installation program's inventory file under the `[all:vars]` group. + -The default values are used in the following example. +.Without mutual TLS (mTLS) authentication to the database +Use the following inventory file snippet to configure each component's database without mTLS authentication. Uncomment the configuration you need. + + +[source,yaml,subs="+attributes"] ---- -[database] -pg_host='db.example.com' -pg_port=5432 -pg_database='awx' -pg_username='awx' -pg_password='redhat' +[all:vars] +# {ControllerNameStart} database variables + +# awx_install_pg_host=data.example.com +# awx_install_pg_port= +# awx_install_pg_database= +# awx_install_pg_username= +# awx_install_pg_password= # This is not required if you enable mTLS authentication to the database +# pg_sslmode=prefer # Set to verify-ca or verify-full to enable mTLS authentication to the database + + +# {EDAName} database variables + +# automationedacontroller_install_pg_host=data.example.com +# automationedacontroller_install_pg_port= +# automationedacontroller_install_pg_database= +# automationedacontroller_install_pg_username= +# automationedacontroller_install_pg_password= # This is not required if you enable mTLS authentication to the database +# automationedacontroller_pg_sslmode=prefer # Set to verify-full to enable mTLS authentication to the database + + +# {HubNameStart} database variables + +# automationhub_pg_host=data.example.com +# automationhub_pg_port= +# automationhub_pg_database= +# automationhub_pg_username= +# automationhub_pg_password= # This is not required if you enable mTLS authentication to the database +# automationhub_pg_sslmode=prefer # Set to verify-ca or verify-full to enable mTLS authentication to the database + + +# {GatewayStart} database variables + +# automationgateway_install_pg_host=data.example.com +# automationgateway_install_pg_port= +# automationgateway_install_pg_database= +# automationgateway_install_pg_username= +# automationgateway_install_pg_password= # This is not required if you enable mTLS authentication to the database +# automationgateway_pg_sslmode=prefer # Set to verify-ca or verify-full to enable mTLS authentication to the database ---- ++ +.With mTLS authentication to the database -. Run the installer. +Use the following inventory file snippet to configure each component's database with mTLS authentication. Uncomment the configuration you need. + -If you are using a PostgreSQL database with {ControllerName}, the database is owned by the connecting user and must have a `createDB` or administrator role assigned to it. -. Check that you are able to connect to the created database with the user, password and database name. -. Check the permission of the user, the user should have the `createDB` or administrator role. +[source,yaml,subs="+attributes"] +---- +[all:vars] +# {ControllerNameStart} database variables + +# awx_install_pg_host=data.example.com +# awx_install_pg_port= +# awx_install_pg_database= +# awx_install_pg_username= +# pg_sslmode=verify-full # This can be either verify-ca or verify-full +# pgclient_sslcert=/path/to/cert # Path to the certificate file +# pgclient_sslkey=/path/to/key # Path to the key file + + +# {EDAName} database variables + +# automationedacontroller_install_pg_host=data.example.com +# automationedacontroller_install_pg_port= +# automationedacontroller_install_pg_database= +# automationedacontroller_install_pg_username= +# automationedacontroller_pg_sslmode=verify-full # EDA does not support verify-ca +# automationedacontroller_pgclient_sslcert=/path/to/cert # Path to the certificate file +# automationedacontroller_pgclient_sslkey=/path/to/key # Path to the key file + + +# {HubNameStart} database variables + +# automationhub_pg_host=data.example.com +# automationhub_pg_port= +# automationhub_pg_database= +# automationhub_pg_username= +# automationhub_pg_sslmode=verify-full # This can be either verify-ca or verify-full +# automationhub_pgclient_sslcert=/path/to/cert # Path to the certificate file +# automationhub_pgclient_sslkey=/path/to/key # Path to the key file + + +# {GatewayStart} database variables + +# automationgateway_install_pg_host=data.example.com +# automationgateway_install_pg_port= +# automationgateway_install_pg_database= +# automationgateway_install_pg_username= +# automationgateway_pg_sslmode=verify-full # This can be either verify-ca or verify-full +# automationgateway_pgclient_sslcert=/path/to/cert # Path to the certificate file +# automationgateway_pgclient_sslkey=/path/to/key # Path to the key file +---- ++ +. Run the installation program. If you are using a PostgreSQL database, the database is owned by the connecting user and must have a `createDB` or administrator role assigned to it. +. Check that you can connect to the created database with the credentials provided in the inventory file. +. Check the permission of the user. The user should have the `createDB` or administrator role. [NOTE] ==== During this procedure, you must check the External Database coverage. For further information, see https://access.redhat.com/articles/4010491 ==== - diff --git a/downstream/modules/platform/proc-synchronizing-rpm-repositories-by-using-reposync.adoc b/downstream/modules/platform/proc-synchronizing-rpm-repositories-by-using-reposync.adoc index 561d2bb41d..6e09a22c99 100644 --- a/downstream/modules/platform/proc-synchronizing-rpm-repositories-by-using-reposync.adoc +++ b/downstream/modules/platform/proc-synchronizing-rpm-repositories-by-using-reposync.adoc @@ -7,14 +7,16 @@ To perform a reposync you need a RHEL host that has access to the internet. After the repositories are synced, you can move the repositories to the disconnected network hosted from a web server. +When downloading RPM, ensure you use the applicable distro. + .Procedure . Attach the BaseOS and AppStream required repositories: + ---- # subscription-manager repos \ - --enable rhel-8-for-x86_64-baseos-rpms \ - --enable rhel-8-for-x86_64-appstream-rpms + --enable rhel-9-for-x86_64-baseos-rpms \ + --enable rhel-9-for-x86_64-appstream-rpms ---- . Perform the reposync: @@ -25,19 +27,13 @@ To perform a reposync you need a RHEL host that has access to the internet. Afte -p /path/to/download ---- -.. Use reposync with `--download-metadata` and without `--newest-only`. See link://https://access.redhat.com/solutions/5186621[RHEL 8] Reposync. - -* If you are not using `--newest-only,` the repos downloaded will be ~90GB. +... Use reposync with `--download-metadata` and without `--newest-only`. See link://https://access.redhat.com/solutions/5186621[RHEL 8] Reposync. -* If you are using `--newest-only,` the repos downloaded will be ~14GB. +* If you are not using `--newest-only,` the repos downloaded may take an extended amount of time to sync due to the large number of GB. -. If you plan to use {RHSSO}, sync these repositories: - -.. jb-eap-7.3-for-rhel-8-x86_64-rpms -.. rh-sso-7.4-for-rhel-8-x86_64-rpms +* If you are using `--newest-only,` the repos downloaded may take an extended amount of time to sync due to the large number of GB. + After the reposync is completed, your repositories are ready to use with a web server. - . Move the repositories to your disconnected network. diff --git a/downstream/modules/platform/proc-uninstalling-containerized-aap.adoc b/downstream/modules/platform/proc-uninstalling-containerized-aap.adoc index 973679b5c2..183bbabd51 100644 --- a/downstream/modules/platform/proc-uninstalling-containerized-aap.adoc +++ b/downstream/modules/platform/proc-uninstalling-containerized-aap.adoc @@ -5,30 +5,39 @@ [role="_abstract"] +When performing a reinstall following an uninstall that preserves the databases, you must use the previously generated {PlatformNameShort} secret key values to access the preserved databases. -To uninstall a containerized deployment, execute the *uninstall.yml* playbook. +Before performing an uninstall, collect the existing secret keys by running the following command: +---- +$ podman secret inspect --showsecret | jq -r .[].SecretData +---- +For example: +---- +$ podman secret inspect --showsecret controller_secret_key | jq -r .[].SecretData +---- + +For more information about the `*_secret_key` variables, see link:{URLContainerizedInstall}/appendix-inventory-files-vars[Inventory file variables]. + +To uninstall a containerized deployment, run the `uninstall` playbook: ---- $ ansible-playbook -i inventory ansible.containerized_installer.uninstall ---- -This will stop all systemd units and containers and then delete all resources used by the containerized installer such as: +This stops all systemd units and containers and then deletes all resources used by the containerized installer such as: -* config and data directories/files +* configuration and data directories and files * systemd unit files -* podman containers and images +* Podman containers and images * RPM packages -To keep container images, you can set the *container_keep_images* variable to true. +To keep container images, set the `container_keep_images` parameter to `true`. ---- $ ansible-playbook -i inventory ansible.containerized_installer.uninstall -e container_keep_images=true ---- -To keep postgresql databases, you can set the *postgresql_keep_databases* variable to true. +To keep PostgreSQL databases, set the `postgresql_keep_databases` parameter to `true`. ---- -$ ansible-playbook -i ansible.containerized_installer.uninstall -e postgresql_keep_databases=true +$ ansible-playbook -i inventory ansible.containerized_installer.uninstall -e postgresql_keep_databases=true ---- -[NOTE] -==== -You will have to use the same django secret key values rather than the auto-generated ones. -==== \ No newline at end of file + diff --git a/downstream/modules/platform/proc-update-aap-container.adoc b/downstream/modules/platform/proc-update-aap-container.adoc new file mode 100644 index 0000000000..46489f8f53 --- /dev/null +++ b/downstream/modules/platform/proc-update-aap-container.adoc @@ -0,0 +1,54 @@ +[id="proc-update-aap-container"] + += Updating container-based {PlatformNameShort} + +Perform a patch update for a {ContainerBase} of {PlatformNameShort} from 2.5 to 2.5.x. + +include::snippets/container-upgrades.adoc[] + +.Prerequisites + +You have done the following: + +* Reviewed the release notes for the associated patch release. For more information, see the link:{URLReleaseNotes}[{PlatformNameShort} {TitleReleaseNotes}]. + +* Created a backup of your {PlatformNameShort} deployment. For more information, see xref:proc-backup-aap-container[Backing up container-based {PlatformNameShort}]. + +.Procedure + +. Download the latest version of the containerized installer from the link:{PlatformDownloadUrl}[{PlatformNameShort} download]. + +.. For online installations *{PlatformNameShort} {PlatformVers} Containerized Setup* + +.. For offline or bundled installations: *{PlatformNameShort} {PlatformVers} Containerized Setup Bundle* + +. Copy the installation program `.tar` file onto your {RHEL} host. + +. Decide where you want the installation program to reside on the filesystem. Installation related files will be created under this location and require at least 10 GB for the initial installation. + +. Unpack the installation program `.tar` file into your installation directory, and go to the unpacked directory. + +.. To unpack the online installer: ++ +---- +$ tar xfvz ansible-automation-platform-containerized-setup-.tar.gz +---- ++ +.. To unpack the offline or bundled installer: ++ +---- +$ tar xfvz ansible-automation-platform-containerized-setup-bundle--.tar.gz +---- ++ +. Edit the `inventory` file to match your required configuration. You can keep the same parameters from your existing {PlatformNameShort} deployment or you can change the parameters to match any modifications to your environment. + +. Run the `install` playbook: ++ +---- +$ ansible-playbook -i inventory ansible.containerized_installer.install +---- ++ +* If your privilege escalation requires a password to be entered, append `-K` to the command. You will then be prompted for the `BECOME` password. +* You can use increasing verbosity, up to 4 v’s (`-vvvv`) to see the details of the installation process. However it is important to note that this can significantly increase installation time, so it is recommended that you use it only as needed or requested by Red Hat support. + +The update begins. \ No newline at end of file diff --git a/downstream/modules/platform/proc-update-aap-on-ocp.adoc b/downstream/modules/platform/proc-update-aap-on-ocp.adoc new file mode 100644 index 0000000000..52b1c9b072 --- /dev/null +++ b/downstream/modules/platform/proc-update-aap-on-ocp.adoc @@ -0,0 +1,15 @@ +[id="update-aap-on-ocp"] += Patch updating {PlatformNameShort} on {OCPShort} + +When you perform a patch update for an installation of {PlatformNameShort} on {OCPShort}, most updates happen within a channel: + +. A new update becomes available in the marketplace (through the redhat-operator CatalogSource). + +. A new InstallPlan is automatically created for your {PlatformNameShort} subscription. If the subscription is set to Manual, the InstallPlan will need to be manually approved in the OpenShift UI. If the subscription is set to Automatic, it will upgrade as soon as the new version is available. ++ +[NOTE] +==== +It is recommended that you set a manual install strategy on your {OperatorPlatformNameShort} subscription (set when installing or upgrading the Operator) and you will be prompted to approve an upgrade when it becomes available in your selected update channel. Stable channels for each X.Y release (for example, stable-2.5) are available. +==== ++ +. A new Subscription, CSV, and Operator containers will be created alongside the old Subscription, CSV, and containers. Then the old resources will be cleaned up if the new install was successful. diff --git a/downstream/modules/platform/proc-update-rhsso-client.adoc b/downstream/modules/platform/proc-update-rhsso-client.adoc index 7625965f9b..22dbe193c4 100644 --- a/downstream/modules/platform/proc-update-rhsso-client.adoc +++ b/downstream/modules/platform/proc-update-rhsso-client.adoc @@ -2,12 +2,13 @@ = Updating the {RHSSO} client -When {HubName} is installed and you know the URL of the instance, you must update the {RHSSO} to set the Valid Redirect URIs and Web Origins settings. +After you install {HubName} and you know the URL of the instance, you must update the {RHSSO} to set the Valid Redirect URIs and Web Origins settings. .Procedure +. Log in to {OCP}. . Navigate to menu:Operator[Installed Operators]. -. Select the RH-SSO project. +. Select the *RH-SSO* project. . Click btn:[Red Hat Single Sign-On Operator]. . Select btn:[Keycloak Client]. . Click on the automation-hub-client-secret client. diff --git a/downstream/modules/platform/proc-upgrade-controller-hub-eda-unified-ui.adoc b/downstream/modules/platform/proc-upgrade-controller-hub-eda-unified-ui.adoc new file mode 100644 index 0000000000..6a86453f3b --- /dev/null +++ b/downstream/modules/platform/proc-upgrade-controller-hub-eda-unified-ui.adoc @@ -0,0 +1,120 @@ +:_newdoc-version: 2.18.3 +:_template-generated: 2024-10-09 +:_mod-docs-content-type: PROCEDURE + +[id="upgrade-controller-hub-eda-unified-ui_{context}"] += Automation controller and automation hub 2.4 and Event-Driven Ansible 2.5 with unified UI upgrades + +{PlatformNameShort} 2.5 supports upgrades from {PlatformNameShort} 2.4 environments for all components, with the exception of {EDAName}. You can also configure a mixed environment with {EDAName} from 2.5 connected to a legacy 2.4 cluster. Combining install methods (OCP, RPM, Containerized) within such a topology is not supported by {PlatformNameShort}. + +[NOTE] +If you are running the 2.4 version of {EDAName} in production, before you upgrade, contact Red Hat support or your account represenative for more information on how to move to {PlatformNameShort} 2.5. + +Supported topologies described in this document assume that: + +* 2.4 services will only include {ControllerName} and {HubName}. +* 2.5 parts will always include {EDAName} and the unified UI ({Gateway}). +* Combining install methods for these topologies is not supported. + +== Upgrade considerations + +* You must maintain two separate inventory files: one for the 2.4 services and one for the 2.5 services. +* You must maintain two separate "installations" wihtin this scenario: one for the 2.4 services and one for the 2.5 services. +* You must "upgrade" the two separate "installations" separately. +* To upgrade to a consistent component version topology, consider the following: +** You must manually combine the inventory file configuration from the 2.4 inventory into the 2.5 inventory and run upgrade on ONLY the 2.5 inventory file. +** You must be using an external database for both the 2.4 inventory as well as the 2.5 inventory. +** Customers using "managed database" instances for either 2.4 or 2.5 inventory must migrate to an external database first, before upgrading. + + +.Prerequisites + +* An inventory from 2.4 for {ControllerName} and {HubName} and a 2.5 inventory for unified UI ({Gateway}) and {EDAName}. You must run upgrades on 2.4 services (using the inventory file to specify only {ControllerName} and {HubName} VMs) to get them to the invitial version of {PlatformNameShort} 2.5 first. When all the services are at the same version, run an upgrade (using a complete inventory file) on all the services to go to the latest version of {PlatformNameShort} 2.5. + +[IMPORTANT] +==== +DO NOT upgrade {EDAName} and the unified UI ({Gateway}) to the latest version of {PlatformNameShort} 2.5 without first upgrading the individual services ({ControllerName} and {HubName}) to the initial version of {PlatformNameShort} 2.5. +==== + +.Procedure + +=== Migration path for 2.4 instances with managed databases + +*Standalone node managed database* + +Convert the database node to an external one, removing it from the inventory. The PostgreSQL node will continue working and will not lose the {PlatformNameShort}-provided setup, but you are responsible for managing its configuration afterward. + +*Collocated managed database* + +. Backup +. Restore with standalone managed database node instead of collocated. +. Unmanaged standalone database + +=== Migration path for 2.4 services with 2.5 services + +If you installed {PlatformNameShort} 2.5 to use {EDAName} in a supported scenario, you can upgrade your {PlatformNameShort} 2.4 {ControllerName} and {HubName} to {PlatformNameShort} 2.5 by following these steps: + +* Merge 2.4 inventory data into the 2.5 inventory. The example below shows the inventory file for {ControllerName} and {HubName} for 2.4 and the inventory file for {EDAName} and the unified UI ({Gateway}) for 2.5, respectively, as the starting point, and what the merged inventory looks like. + +*Inventory files from 2.4* + +[source,bash] +---- +[automationcontroller] +controller-1 +controller-2 + +[automationhub] +hub-1 +hub-2 + +[all:vars] +# Here we have the admin passwd, db credentials, etc. +---- + +*Inventory files from 2.5* +[source,] +---- +[edacontroller] +eda-1 +eda-2 + +[gateway] +gw-1 +gw-2 + +[all:vars] +# Here we have admin passwd, db credentials etc. +---- + +*Merged Inventory* +[source,] +---- +[automationcontroller] +controller-1 +controller-2 + +[automationhub] +hub-1 +hub-2 + +[edacontroller] +eda-1 +eda-2 + +[gateway] +gw-1 +gw-2 + +[all:vars] +# Here we have admin passwd, db credentials etc from both inventories above +---- + +* Run `setup.sh` +The installer upgrades {ControllerName} and {HubName} from 2.4 to {PlatformNameShort} 2.5.latest, {EDAName} and the unified UI ({Gateway}) from the fresh install of 2.5 to the latest version of 2.5, and connects {ControllerName} and {HubName} properly with the unified UI ({Gateway}) node to initialize the unified experience. + +.Verification + +* Verify that everything has upgraded to 2.5 and is working properly in one of two ways: +** performing an SSH to {ControllerName} and {EDAName}. +** In the unified UI, navigate to *Help > About* to verify the RPM versions are at 2.5. diff --git a/downstream/modules/platform/proc-upgrading-between-minor-aap-releases.adoc b/downstream/modules/platform/proc-upgrading-between-minor-aap-releases.adoc index 4abc957029..e25d701841 100644 --- a/downstream/modules/platform/proc-upgrading-between-minor-aap-releases.adoc +++ b/downstream/modules/platform/proc-upgrading-between-minor-aap-releases.adoc @@ -9,7 +9,7 @@ [role="_abstract"] -To upgrade between minor releases of {PlatformNameShort} 2, use this general workflow. +To upgrade between minor releases of {PlatformNameShort} 2 on your {VMBase}, use this general workflow. .Procedure diff --git a/downstream/modules/platform/proc-using-postinstall.adoc b/downstream/modules/platform/proc-using-postinstall.adoc index 8852c919e8..245094015a 100644 --- a/downstream/modules/platform/proc-using-postinstall.adoc +++ b/downstream/modules/platform/proc-using-postinstall.adoc @@ -2,55 +2,58 @@ [id="using-postinstall_{context}"] -= Using postinstall feature of containerized {PlatformNameShort} += Using the postinstall feature of containerized {PlatformNameShort} [role="_abstract"] +You can use the optional postinstall feature of containerized {PlatformNameShort} to define and load the configuration during the initial installation. This uses a configuration-as-code approach, where you simply define your configuration to be loaded as YAML files. -Use the experimental postinstaller feature of containerized {PlatformNameShort} to define and load the configuration during the initial installation. This uses a configuration-as-code approach, where you simply define your configuration to be loaded as simple YAML files. +.Prerequisites +* An {PlatformNameShort} license for this feature that is on the local filesystem so it can be automatically loaded from the inventory file. -. To use this optional feature, you need to uncomment the following vars in the inventory file: + +.Procedure +. The postinstall feature is disabled by default. To enable the postinstall feature, add the following variables in your inventory file: + ---- controller_postinstall=true ---- + - -. The default is false, so you need to enable this to activate the postinstaller. You need a {PlatformNameShort} license for this feature that must reside on the local filesystem so it can be automatically loaded: +. To load your {ControllerName} license as part of the postinstall process, set the following variables in your inventory file: + ---- -controller_license_file=/full_path_to/manifest_file.zip +controller_license_file= +controller_postinstall_dir= ---- + - . You can pull your configuration-as-code from a Git based repository. To do this, set the following variables to dictate where you pull the content from and where to store it for upload to the {PlatformNameShort} controller: + ---- -controller_postinstall_repo_url=https://your_cac_scm_repo -controller_postinstall_dir=/full_path_to_where_you_want_the pulled_content_to_reside +controller_postinstall_repo_url= +controller_postinstall_dir= +controller_postinstall_repo_ref=main ---- + - -. The controller_postinstall_repo_url variable can be used to define the postinstall repository URL which must include authentication information. +. The `controller_postinstall_repo_url` variable defines the postinstall repository URL which must include authentication information. + ---- -http(s):///.git (public repository without http(s) authentication) -http(s)://:@:.git (private repository with http(s) authentication) -git@:.git (public/private repository with ssh authentication) +http(s):///.git (public repository without HTTP(S) authentication) +http(s)://:@:.git (private repository with HTTP(S) authentication) +git@:.git (public or private repository with SSH authentication) ---- + [NOTE] ==== -When using ssh based authentication, the installer does not configure anything for you, so you must configure everything on the installer node. +When using SSH based authentication, the installer does not configure anything for you, so you must configure everything on the installer node. ==== -Definition files use the link:https://console.redhat.com/ansible/automation-hub/namespaces/infra/[infra certified collections]. The link:https://console.redhat.com/ansible/automation-hub/repo/validated/infra/controller_configuration/[controller_configuration] collection is preinstalled as part of the installation and uses the installation controller credentials you supply in the inventory file for access into the {PlatformNameShort} controller. You simply need to give the YAML configuration files. +Definition files that are used by {Builder} to create {ExecEnvNameSing} images use the link:https://console.redhat.com/ansible/automation-hub/namespaces/infra/[infra certified collections]. The link:https://console.redhat.com/ansible/automation-hub/repo/validated/infra/controller_configuration/[controller_configuration] collection is preinstalled as part of the installation and uses the installation controller credentials you supply in the inventory file for access into the {PlatformNameShort} controller. You need to give the YAML configuration files. -You can setup {PlatformNameShort} configuration attributes such as credentials, LDAP settings, users and teams, organizations, projects, inventories and hosts, job and workflow templates. +You can set up {PlatformNameShort} configuration attributes such as credentials, LDAP settings, users and teams, organizations, projects, inventories and hosts, job and workflow templates. -The following example shows a sample *your-config.yml* file defining and loading controller job templates. The example demonstrates a simple change to the preloaded demo example provided with an {PlatformNameShort} installation. +The following example shows a sample `your-config.yml` file defining and loading controller job templates. The example demonstrates a simple change to the example provided with an {PlatformNameShort} installation. ---- /full_path_to_your_configuration_as_code/ diff --git a/downstream/modules/platform/proc-verify-aap-installation.adoc b/downstream/modules/platform/proc-verify-aap-installation.adoc new file mode 100644 index 0000000000..265368f495 --- /dev/null +++ b/downstream/modules/platform/proc-verify-aap-installation.adoc @@ -0,0 +1,14 @@ +[id="proc-verify-aap-installation_{context}"] + += Verifying installation of {PlatformNameShort} + +[role="_abstract"] +Upon a successful login, your installation of {PlatformName} is complete. + +[IMPORTANT] +==== +If the installation fails and you are a customer who has purchased a valid license for {PlatformName}, contact Ansible through the link:https://docs.redhat.com/[Red Hat Customer portal]. +==== + +.Additional resources +See link:{LinkGettingStarted} for post installation instructions. diff --git a/downstream/modules/platform/proc-verify-network-connectivity.adoc b/downstream/modules/platform/proc-verify-network-connectivity.adoc index 8aaca228cf..f965d8e303 100644 --- a/downstream/modules/platform/proc-verify-network-connectivity.adoc +++ b/downstream/modules/platform/proc-verify-network-connectivity.adoc @@ -11,7 +11,7 @@ Take note of the host and port information from your existing deployment. This i .Procedure -. Create a yaml file to verify the connection between your new deployment and your old deployment database: +. Create a YAML file to verify the connection between your new deployment and your old deployment database: + ----- apiVersion: v1 @@ -44,7 +44,7 @@ oc rsh dbchecker . After the shell session opens in the pod, verify that the new project can connect to your old project cluster: + ----- -pg_isready -h -p -U awx +pg_isready -h -p -U AutomationContoller ----- + .Example diff --git a/downstream/modules/platform/ref-OCP-system-requirements.adoc b/downstream/modules/platform/ref-OCP-system-requirements.adoc new file mode 100644 index 0000000000..4567c31bc7 --- /dev/null +++ b/downstream/modules/platform/ref-OCP-system-requirements.adoc @@ -0,0 +1,7 @@ + + +// [id="ref-OCP-system-requirements_{context}"] + += System requirements for installing on {OCPShort} + +For system requirements for installing {PlatformNameShort} on {OCPShort}, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/tested_deployment_models/ocp-topologies#tested_system_configurations_6[Tested system configurations] section of _{TitleTopologies}_. \ No newline at end of file diff --git a/downstream/modules/platform/ref-RPM-system-requirements.adoc b/downstream/modules/platform/ref-RPM-system-requirements.adoc new file mode 100644 index 0000000000..4e4cf04de4 --- /dev/null +++ b/downstream/modules/platform/ref-RPM-system-requirements.adoc @@ -0,0 +1,7 @@ + + +// [id="ref-RPM-system-requirements_{context}"] + += System requirements for RPM installation + +For system requirements for the RPM installation method of {PlatformNameShort}, see the link:{URLInstallationGuide}/platform-system-requirements[System requirements] section of _{TitleInstallationGuide}_. \ No newline at end of file diff --git a/downstream/modules/platform/ref-accessing-control-auto-hub-eda-control.adoc b/downstream/modules/platform/ref-accessing-control-auto-hub-eda-control.adoc index 19eb0578b8..1d49f70481 100644 --- a/downstream/modules/platform/ref-accessing-control-auto-hub-eda-control.adoc +++ b/downstream/modules/platform/ref-accessing-control-auto-hub-eda-control.adoc @@ -1,54 +1,66 @@ :_mod-docs-content-type: REFERENCE -[id="accessing-control-auto-hub-eda-control_{context}"] +[id="accessing-ansible-automation-platform_{context}"] -= Accessing {ControllerName}, {HubName}, and {EDAcontroller} += Accessing {PlatformNameShort} [role="_abstract"] -After the installation completes, these are the default protocol and ports used: +After the installation completes, the default protocol and ports used for {PlatformNameShort} are 80 (HTTP) and 443 (HTTPS). -* http/https protocol +You can customize the ports with the following variables: -* Ports 8080/8443 for {ControllerName} +---- +envoy_http_port=80 +envoy_https_port=443 +---- -* Ports 8081/8444 for {HubName} +If you want to disable HTTPS, set `envoy_disable_https` to `true`: -* Ports 8082/8445 for {EDAcontroller} +---- +envoy_disable_https=true +---- +.Accessing the platform UI -These can be changed. Consult the *README.md* for further details. It is recommended that you leave the defaults unless you need to change them due to port conflicts or other factors. +The platform UI is available by default at: +---- +https://:443 +---- -.Accessing {ControllerName} UI +Log in as the admin user with the password you created for `gateway_admin_password`. -The {ControllerName} UI is available by default at: +// Michelle: Removing additional component UI references as platform gateway UI will be used going forward - AAP-18760 +// .Accessing {ControllerName} UI ----- -https://:8443 ----- +// The {ControllerName} UI is available by default at: -Log in as the admin user with the password you created for *controller_admin_password*. +// ---- +// https://:8443 +// ---- -If you supplied the license manifest as part of the installation, the {PlatformNameShort} dashboard is displayed. If you did not supply a license file, the *Subscription* screen is displayed where you must supply your license details. This is documented here: link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_operations_guide/assembly-aap-activate[Chapter 1. Activating Red Hat Ansible Automation Platform]. +// Log in as the admin user with the password you created for *controller_admin_password*. -.Accessing {HubName} UI +// If you supplied the license manifest as part of the installation, the {PlatformNameShort} dashboard is displayed. If you did not supply a license file, the *Subscription* screen is displayed where you must supply your license details. This is documented here: link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_operations_guide/assembly-aap-activate[Chapter 1. Activating {PlatformName}]. -The {HubName} UI is available by default at: +// .Accessing {HubName} UI ----- -https://:8444 ----- +// The {HubName} UI is available by default at: -Log in as the admin user with the password you created for *hub_admin_password*. +// ---- +// https://:8444 +// ---- +// Log in as the admin user with the password you created for *hub_admin_password*. -.Accessing {EDAName} UI -The {EDAName} UI is available by default at: ----- -https://:8445 ----- +// .Accessing {EDAName} UI + +// The {EDAName} UI is available by default at: +// ---- +// https://:8445 +// ---- -Log in as the admin user with the password you created for *eda_admin_password*. +// Log in as the admin user with the password you created for *eda_admin_password*. diff --git a/downstream/modules/platform/ref-adding-execution-nodes.adoc b/downstream/modules/platform/ref-adding-execution-nodes.adoc index bbd6a7890a..1778f43158 100644 --- a/downstream/modules/platform/ref-adding-execution-nodes.adoc +++ b/downstream/modules/platform/ref-adding-execution-nodes.adoc @@ -8,23 +8,25 @@ [role="_abstract"] -The containerized installer can deploy remote execution nodes. This is handled by the execution_nodes group in the ansible inventory file. +The containerized installer can deploy remote execution nodes. The `execution_nodes` group in the inventory file handles this. ---- [execution_nodes] -fqdn_of_your_execution_host + ---- An execution node is by default configured as an execution type running on port 27199 (TCP). This can be changed by the following variables: -* receptor_port=27199 -* receptor_protocol=tcp -* receptor_type=hop +---- +receptor_port=27199 +receptor_protocol=tcp +receptor_type=hop +---- -Receptor type value can be either execution or hop, while the protocol is either TCP or UDP. By default, the nodes in the `execution_nodes` group will be added as peers for the controller node. However, you can change the peers configuration by using the `receptor_peers` variable. +The `receptor_type` value can be either `execution` or `hop`, while the `receptor_protocol` is either `tcp` or `udp`. By default, the nodes in the `execution_nodes` group are added as peers for the controller node. However, you can change the peers configuration by using the `receptor_peers` variable. ---- [execution_nodes] fqdn_of_your_execution_host -fqdn_of_your_hop_host receptor_type=hop receptor_peers=’[“fqdn_of_your_execution_host”]’ +fqdn_of_your_hop_host receptor_type=hop receptor_peers='[""]' ---- \ No newline at end of file diff --git a/downstream/modules/platform/ref-ansible-inventory-variables.adoc b/downstream/modules/platform/ref-ansible-inventory-variables.adoc index 9155e8009d..30c98b95c4 100644 --- a/downstream/modules/platform/ref-ansible-inventory-variables.adoc +++ b/downstream/modules/platform/ref-ansible-inventory-variables.adoc @@ -11,46 +11,41 @@ For a list of global configuration options, see link:https://docs.ansible.com/an [cols="50%,50%",options="header"] |==== | *Variable* | *Description* -| *`ansible_connection`* | The connection plugin used for the task on the target host. +| `ansible_connection` | The connection plugin used for the task on the target host. -This can be the name of any of Ansible connection plugin. -SSH protocol types are `smart`, `ssh` or `paramiko`. +This can be the name of any of Ansible connection plugins. +SSH protocol types are `smart`, `ssh`, or `paramiko`. Default = `smart` -| *`ansible_host`* | The ip or name of the target host to use instead of *`inventory_hostname`*. -| *`ansible_port`* | The connection port number. +| `ansible_host` | The IP address or name of the target host to use instead of `inventory_hostname`. +| `ansible_password` | The password to authenticate to the host. -Default: 22 for ssh -| *`ansible_user`* | The user name to use when connecting to the host. -| *`ansible_password`* | The password to authenticate to the host. +Do not store this variable in plain text. Always use a vault. For more information, see link:https://docs.ansible.com/ansible-core/devel/tips_tricks/ansible_tips_tricks.html#keep-vaulted-variables-safely-visible[Keep vaulted variables safely visible]. +| `ansible_port` | The connection port number. -Never store this variable in plain text. +The default for SSH is `22`. +| `ansible_scp_extra_args` | This setting is always appended to the default `scp` command line. +| `ansible_sftp_extra_args` | This setting is always appended to the default `sftp` command line. +| `ansible_shell_executable` | This sets the shell that the Ansible controller uses on the target machine and overrides the executable in `ansible.cfg` which defaults to `/bin/sh`. +| `ansible_shell_type` | The shell type of the target system. -Always use a vault. -| *`ansible_ssh_private_key_file`* | Private key file used by SSH. -Useful if using multiple keys and you do not want to use an SSH agent. -| *`ansible_ssh_common_args`* | This setting is always appended to the default command line for `sftp`, `scp`, and `ssh`. -Useful to configure a ProxyCommand for a certain host or group. -| *`ansible_sftp_extra_args`* | This setting is always appended to the default `sftp` command line. -| *`ansible_scp_extra_args`* | This setting is always appended to the default `scp` command line. -| *`ansible_ssh_extra_args`* | This setting is always appended to the default `ssh` command line. -| *`ansible_ssh_pipelining`* | Determines if SSH pipelining is used. -This can override the pipelining setting in `ansible.cfg`. +Do not use this setting unless you have set the `ansible_shell_executable` to a non-Bourne (sh) compatible shell. +By default commands are formatted using sh-style syntax. Setting this to `csh` or `fish` causes commands executed on target systems to follow the syntax of those shells instead. +| `ansible_ssh_common_args` | This setting is always appended to the default command line for `sftp`, `scp`, and `ssh`. +Useful to configure a `ProxyCommand` for a certain host or group. +| `ansible_ssh_executable` | This setting overrides the default behavior to use the system `ssh`. +This can override the `ssh_executable` setting in `ansible.cfg`. +| `ansible_ssh_extra_args` | This setting is always appended to the default `ssh` command line. +| `ansible_ssh_pipelining` | Determines if SSH `pipelining` is used. + +This can override the `pipelining` setting in `ansible.cfg`. If using SSH key-based authentication, the key must be managed by an SSH agent. -| *`ansible_ssh_executable`* | Added in version 2.2. +| `ansible_ssh_private_key_file` | Private key file used by SSH. -This setting overrides the default behavior to use the system SSH. -This can override the ssh_executable setting in `ansible.cfg`. -| *`ansible_shell_type`* | The shell type of the target system. -Do not use this setting unless you have set the `ansible_shell_executable` to a non-Bourne (sh) compatible shell. -By default commands are formatted using sh-style syntax. -Setting this to `csh` or `fish` causes commands executed on target systems to follow the syntax of those shells instead. -| *`ansible_shell_executable`* | This sets the shell that the Ansible controller uses on the target machine, and overrides the executable in `ansible.cfg` which defaults to `/bin/sh`. +Useful if using multiple keys and you do not want to use an SSH agent. +| `ansible_user` | The user name to use when connecting to the host. Do not change this variable unless `/bin/sh` is not installed on the target machine or cannot be run from sudo. -| *`inventory_hostname`* | This variable takes the hostname of the machine from the inventory script or the Ansible configuration file. - -You cannot set the value of this variable. - -Because the value is taken from the configuration file, the actual runtime hostname value can vary from what is returned by this variable. +| `inventory_hostname` | This variable takes the hostname of the machine from the inventory script or the Ansible configuration file. +You cannot set the value of this variable. Because the value is taken from the configuration file, the actual runtime hostname value can vary from what is returned by this variable. |==== diff --git a/downstream/modules/platform/ref-automation-hub-requirements.adoc b/downstream/modules/platform/ref-automation-hub-requirements.adoc index 5c112b7ee9..dcae1e995a 100644 --- a/downstream/modules/platform/ref-automation-hub-requirements.adoc +++ b/downstream/modules/platform/ref-automation-hub-requirements.adoc @@ -2,25 +2,8 @@ = {HubNameStart} system requirements -{HubNameStart} enables you to discover and use new certified automation content from Red Hat Ansible and Certified Partners. On {HubNameMain}, you can discover and manage Ansible Collections, which are supported automation content developed by Red Hat and its partners for use cases such as cloud automation, network automation, and security automation. +{HubNameStart} allows you to discover and use new certified automation content from Red Hat Ansible and Certified Partners. On {HubNameMain}, you can discover and manage Ansible Collections, which are supported automation content developed by Red Hat and its partners for use cases such as cloud automation, network automation, and security automation. -{HubNameStart} has the following system requirements: - -[cols="a,a,a"] -|=== -h|Requirement | Required | Notes - -| *RAM* | 8 GB minimum | - -* 8 GB RAM (minimum and recommended for Vagrant trial installations) -* 8 GB RAM (minimum for external standalone PostgreSQL databases) -* For capacity based on forks in your configuration, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-jobs#controller-capacity-determination[{ControllerNameStart} capacity determination and job impact]. -| *CPUs* | 2 minimum | - -For capacity based on forks in your configuration, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-jobs#controller-capacity-determination[{ControllerNameStart} capacity determination and job impact]. -| *Local disk* | 60 GB disk | Dedicate a minimum of 40GB to `/var` for collection storage. - -|=== [NOTE] ==== @@ -33,5 +16,5 @@ To avoid this, use the `automationhub_main_url` inventory variable with a value This adds the external address to `/etc/pulp/settings.py`. This implies that you only want to use the external address. -For information about inventory file variables, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/appendix-inventory-files-vars[Inventory file variables] in the _{PlatformName} Installation Guide_. +For information about inventory file variables, see xref:appendix-inventory-files-vars[Inventory file variables]. ==== diff --git a/downstream/modules/platform/ref-azure-key-vault-lookup.adoc b/downstream/modules/platform/ref-azure-key-vault-lookup.adoc index d361803a04..1c8ca8a773 100644 --- a/downstream/modules/platform/ref-azure-key-vault-lookup.adoc +++ b/downstream/modules/platform/ref-azure-key-vault-lookup.adoc @@ -5,9 +5,9 @@ When you select *{Azure} Key Vault* for *Credential Type*, give the following metadata to configure your lookup: * *Vault URL (DNS Name)* (required): give the URL used for communicating with {Azure}'s key management system -* *Client ID* (required): give the identifier as obtained by the {Azure} Active Directory -* *Client Secret* (required): give the secret as obtained by the {Azure} Active Directory -* *Tenant ID* (required): give the unique identifier that is associated with an {Azure} Active Directory instance within an Azure subscription +* *Client ID* (required): give the identifier as obtained by {MSEntraID} +* *Client Secret* (required): give the secret as obtained by {MSEntraID} +* *Tenant ID* (required): give the unique identifier that is associated with an {MSEntraID} instance within an Azure subscription * *Cloud Environment*: select the applicable cloud environment to apply //The following is an example of a configured {Azure} KMS credential. diff --git a/downstream/modules/platform/ref-configuring-inventory-file.adoc b/downstream/modules/platform/ref-configuring-inventory-file.adoc new file mode 100644 index 0000000000..05b88a72e1 --- /dev/null +++ b/downstream/modules/platform/ref-configuring-inventory-file.adoc @@ -0,0 +1,135 @@ +[id="configuring-inventory-file"] += Configuring the inventory file + +You can control the installation of {PlatformNameShort} with inventory files. Inventory files define the hosts and containers used and created, variables for components, and other information needed to customize the installation. + +Example inventory files are provided in this document that you can copy and change to quickly get started. + +Inventory files for the growth and enterprise topologies are also found in the downloaded installer package: + +* The default one named `inventory` is for the enterprise topology pattern. + +* If you want to deploy the growth or all-in-one pattern you need to copy over or use the `inventory-growth` file instead. + +Additionally, you can find example inventory files in link:{URLTopologies}/container-topologies[Container topologies] in _{TitleTopologies}_. + +To use the example inventory files, replace the `< >` placeholders with your specific variables, and update the host names. Refer to the `README.md` file in the installation directory for more information about optional and required variables. + +== Inventory file for online installation for containerized growth topology (all-in-one) + +Use the example inventory file to perform an online installation for the containerized growth topology (all-in-one): + +include::snippets/inventory-cont-a-env-a.adoc[] + +== Inventory file for online installation for containerized enterprise topology + +Use the example inventory file to perform an online installation for the containerized enterprise topology: + +include::snippets/inventory-cont-b-env-a.adoc[] + +.Redis configuration for an enterprise topology +include::snippets/redis-colocation-containerized.adoc[] +* By default the `redis_mode` is set to `cluster`. +** `redis_mode=cluster` + +* For more information about Redis, see link:{URLPlanningGuide}/ha-redis_planning[Caching and queueing system] in _{TitlePlanningGuide}_. + +== Additional information for configuring your inventory file + +For more information about the variables you can use to configure your inventory file, see link:{URLContainerizedInstall}/appendix-inventory-files-vars[Inventory file variables] + +.Offline or bundled installation + +* To perform an offline installation, add the following under the `[all:vars]` group: + +---- +bundle_install=true +# The bundle directory must include /bundle in the path +bundle_dir= +---- + +.Configuring a HAProxy load balancer + +* To configure a HAProxy load balancer in front of {Gateway} with a custom CA cert, set the following inventory file variables under the `[all:vars]` group: + +---- +custom_ca_cert= +gateway_main_url= +---- + +[NOTE] +==== +HAProxy SSL passthrough mode is not supported with {Gateway}. +==== + +.Configuring Network File System (NFS) storage for {HubName} + +NFS is a type of shared storage that is supported in containerized installations. Shared storage is required when installing more than one instance of {HubName} with a `file` storage backend. When installing a single instance of the {HubName}, shared storage is optional. + +* To configure shared storage for {HubName}, set the following variable in the inventory file, ensuring your NFS share has read, write, and execute permissions: + +---- +hub_shared_data_path= +---- + +* To change the mount options for your NFS share, use the `hub_shared_data_mount_opts` variable. This variable is optional and the default value is `rw,sync,hard`. + +.Configuring Amazon S3 storage for {HubName} + +Amazon S3 storage is a type of object storage that is supported in containerized installations. When using an AWS S3 storage backend, set `hub_storage_backend` to `s3`. The AWS S3 bucket needs to exist before running the installation program. + +The variables you can use to configure this storage backend type in your inventory file are: + +* `hub_s3_access_key` +* `hub_s3_secret_key` +* `hub_s3_bucket_name` +* `hub_s3_extra_settings` + +Extra parameters can be passed through an Ansible `hub_s3_extra_settings` dictionary. + +For example, you can set the following parameters: + +---- +hub_s3_extra_settings: + AWS_S3_MAX_MEMORY_SIZE: 4096 + AWS_S3_REGION_NAME: eu-central-1 + AWS_S3_USE_SSL: True +---- + +For more information about the list of parameters, see link:https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings[django-storages documentation - Amazon S3]. + +.Configuring Azure Blob Storage for {HubName} + +Azure Blob storage is a type of object storage that is supported in containerized installations. +When using an Azure blob storage backend, set `hub_storage_backend` to `azure`. The Azure container needs to exist before running the installation program. + +The variables you can use to configure this storage backend type in your inventory file are: + +* `hub_azure_account_key` +* `hub_azure_account_name` +* `hub_azure_container` +* `hub_azure_extra_settings` + +Extra parameters can be passed through an Ansible `hub_azure_extra_settings` dictionary. + +For example, you can set the following parameters: + +---- +hub_azure_extra_settings: + AZURE_LOCATION: foo + AZURE_SSL: True + AZURE_URL_EXPIRATION_SECS: 60 +---- + +For more information about the list of parameters, see link:https://django-storages.readthedocs.io/en/latest/backends/azure.html#settings[django-storages documentation - Azure Storage]. + + +.Loading an {ControllerName} license file + +* To define the location of your {ControllerName} license file, set the following variable in the inventory file: + +---- +controller_license_file= +---- + +//* To define the license file as part of the postinstall process instead, see xref:using-postinstall_{context}[Using the postinstall feature of containerized {PlatformNameShort}]. diff --git a/downstream/modules/platform/ref-containerized-system-requirements.adoc b/downstream/modules/platform/ref-containerized-system-requirements.adoc new file mode 100644 index 0000000000..a187c71e23 --- /dev/null +++ b/downstream/modules/platform/ref-containerized-system-requirements.adoc @@ -0,0 +1,7 @@ + + +// [id="ref-containerized-system-requirements_{context}"] + += System requirements for containerized installation + +For system requirements for the containerized installation method of {PlatformNameShort}, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/containerized_installation/aap-containerized-installation#system_requirements[System requirements] section of _{TitleContainerizedInstall}_. \ No newline at end of file diff --git a/downstream/modules/platform/ref-containerized-troubleshoot-config.adoc b/downstream/modules/platform/ref-containerized-troubleshoot-config.adoc new file mode 100644 index 0000000000..17732e9094 --- /dev/null +++ b/downstream/modules/platform/ref-containerized-troubleshoot-config.adoc @@ -0,0 +1,23 @@ +[id="ref-containerized-troubleshoot-config"] + += Troubleshooting containerized {PlatformNameShort} configuration + +// New items have appeared within installation program directory once an install has been run. What are these items? + +.Sometimes the post install for seeding my {PlatformNameShort} content errors out. This could manifest itself as output similar to this: + +---- +TASK [infra.controller_configuration.projects : Configure Controller Projects | Wait for finish the projects creation] *************************************** +Friday 29 September 2023 11:02:32 +0100 (0:00:00.443) 0:00:53.521 ****** +FAILED - RETRYING: [daap1.lan]: Configure Controller Projects | Wait for finish the projects creation (1 retries left). +failed: [daap1.lan] (item={'failed': 0, 'started': 1, 'finished': 0, 'ansible_job_id': '536962174348.33944', 'results_file': '/home/aap/.ansible_async/536962174348.33944', 'changed': False, '__controller_project_item': {'name': 'AAP Config-As-Code Examples', 'organization': 'Default', 'scm_branch': 'main', 'scm_clean': 'no', 'scm_delete_on_update': 'no', 'scm_type': 'git', 'scm_update_on_launch': 'no', 'scm_url': 'https://github.com/user/repo.git'}, 'ansible_loop_var': '__controller_project_item'}) => {"__projects_job_async_results_item": {"__controller_project_item": {"name": "AAP Config-As-Code Examples", "organization": "Default", "scm_branch": "main", "scm_clean": "no", "scm_delete_on_update": "no", "scm_type": "git", "scm_update_on_launch": "no", "scm_url": "https://github.com/user/repo.git"}, "ansible_job_id": "536962174348.33944", "ansible_loop_var": "__controller_project_item", "changed": false, "failed": 0, "finished": 0, "results_file": "/home/aap/.ansible_async/536962174348.33944", "started": 1}, "ansible_job_id": "536962174348.33944", "ansible_loop_var": "__projects_job_async_results_item", "attempts": 30, "changed": false, "finished": 0, "results_file": "/home/aap/.ansible_async/536962174348.33944", "started": 1, "stderr": "", "stderr_lines": [], "stdout": "", "stdout_lines": []} +---- + +The `infra.controller_configuration.dispatch` role uses an asynchronous loop with 30 retries to apply each configuration type, and the default delay between retries is 1 second. If the configuration is large, this might not be enough time to apply everything before the last retry occurs. + +Increase the retry delay by setting the `controller_configuration_async_delay` variable to something other than 1 second. For example, setting it to 2 seconds doubles the retry time. The place to do this would be in the repository where the controller configuration is defined. It could also be added to the `[all:vars]` section of the installation program inventory file. + +A few instances have shown that no additional modification is required, and re-running the installation program again worked. + +//I have updated a configuration file, how do I get changes to apply? +// diff --git a/downstream/modules/platform/ref-containerized-troubleshoot-diagnosing.adoc b/downstream/modules/platform/ref-containerized-troubleshoot-diagnosing.adoc new file mode 100644 index 0000000000..b3e07a66ca --- /dev/null +++ b/downstream/modules/platform/ref-containerized-troubleshoot-diagnosing.adoc @@ -0,0 +1,123 @@ +[id="ref-containerized-troubleshoot-diagnosing"] + += Diagnosing the problem + +For general container-based troubleshooting, you can inspect the container logs for any running service to help troubleshoot underlying issues. + +.Identifying the running containers + +To get a list of the running container names run the following command: + +---- +$ podman ps --all --format "{{.Names}}" +---- + +Example output: + +---- +postgresql +redis-unix +redis-tcp +receptor +automation-controller-rsyslog +automation-controller-task +automation-controller-web +automation-eda-api +automation-eda-daphne +automation-eda-web +automation-eda-worker-1 +automation-eda-worker-2 +automation-eda-activation-worker-1 +automation-eda-activation-worker-2 +automation-eda-scheduler +automation-gateway-proxy +automation-gateway +automation-hub-api +automation-hub-content +automation-hub-web +automation-hub-worker-1 +automation-hub-worker-2 +---- + +.Inspecting the logs + +To inspect any running container logs run the `journalctl` command: + +---- +$ journalctl CONTAINER_NAME= +---- + +Example command with output: + +---- +$ journalctl CONTAINER_NAME=automation-gateway-proxy + +Oct 08 01:40:12 aap.example.org automation-gateway-proxy[1919]: [2024-10-08 00:40:12.885][2][info][upstream] [external/envoy/source/common/upstream/cds_ap> +Oct 08 01:40:12 aap.example.org automation-gateway-proxy[1919]: [2024-10-08 00:40:12.885][2][info][upstream] [external/envoy/source/common/upstream/cds_ap> +Oct 08 01:40:19 aap.example.org automation-gateway-proxy[1919]: [2024-10-08T00:40:16.753Z] "GET /up HTTP/1.1" 200 - 0 1138 10 0 "192.0.2.1" "python-> +---- + +To view the logs of a running container in real-time, run the `podman logs -f` command: + +---- +$ podman logs -f +---- + +.Controlling container operations + +You can control operations for a container by running the `systemctl` command: + +---- +$ systemctl --user status +---- + +Example command with output: + +---- +$ systemctl --user status automation-gateway-proxy +● automation-gateway-proxy.service - Podman automation-gateway-proxy.service + Loaded: loaded (/home/user/.config/systemd/user/automation-gateway-proxy.service; enabled; preset: disabled) + Active: active (running) since Mon 2024-10-07 12:39:23 BST; 23h ago + Docs: man:podman-generate-systemd(1) + Process: 780 ExecStart=/usr/bin/podman start automation-gateway-proxy (code=exited, status=0/SUCCESS) + Main PID: 1919 (conmon) + Tasks: 1 (limit: 48430) + Memory: 852.0K + CPU: 2.996s + CGroup: /user.slice/user-1000.slice/user@1000.service/app.slice/automation-gateway-proxy.service + └─1919 /usr/bin/conmon --api-version 1 -c 2dc3c7b2cecd73010bad1e0aaa806015065f92556ed3591c9d2084d7ee209c7a -u 2dc3c7b2cecd73010bad1e0aaa80> +Oct 08 11:44:10 aap.example.org automation-gateway-proxy[1919]: [2024-10-08T10:44:02.926Z] "GET /api/galaxy/_ui/v1/settings/ HTTP/1.1" 200 - 0 654 58 47 "> +Oct 08 11:44:10 aap.example.org automation-gateway-proxy[1919]: [2024-10-08T10:44:03.387Z] "GET /api/controller/v2/config/ HTTP/1.1" 200 - 0 4018 58 44 "1> +Oct 08 11:44:10 aap.example.org automation-gateway-proxy[1919]: [2024-10-08T10:44:03.370Z] "GET /api/galaxy/v3/plugin/ansible/search/collection-versions/?> +Oct 08 11:44:10 aap.example.org automation-gateway-proxy[1919]: [2024-10-08T10:44:03.405Z] "GET /api/controller/v2/organizations/?role_level=notification_> +Oct 08 11:44:10 aap.example.org automation-gateway-proxy[1919]: [2024-10-08T10:44:04.366Z] "GET /api/galaxy/_ui/v1/me/ HTTP/1.1" 200 - 0 1368 79 40 "192.1> +Oct 08 11:44:10 aap.example.org automation-gateway-proxy[1919]: [2024-10-08T10:44:04.360Z] "GET /api/controller/v2/workflow_approvals/?page_size=200&statu> +Oct 08 11:44:10 aap.example.org automation-gateway-proxy[1919]: [2024-10-08T10:44:04.379Z] "GET /api/controller/v2/job_templates/7/ HTTP/1.1" 200 - 0 1356> +Oct 08 11:44:10 aap.example.org automation-gateway-proxy[1919]: [2024-10-08T10:44:04.378Z] "GET /api/galaxy/_ui/v1/feature-flags/ HTTP/1.1" 200 - 0 207 81> +Oct 08 11:44:13 aap.example.org automation-gateway-proxy[1919]: [2024-10-08 10:44:13.856][2][info][upstream] [external/envoy/source/common/upstream/cds_ap> +Oct 08 11:44:13 aap.example.org automation-gateway-proxy[1919]: [2024-10-08 10:44:13.856][2][info][upstream] [external/envoy/source/common/upstream/cds_ap +---- + +.Getting container information about the execution plane + +To get container information about {ControllerName}, {EDAName}, and `execution_nodes` nodes, prefix any Podman commands with either: + +---- +CONTAINER_HOST=unix://run/user//podman/podman.sock +---- + +or + +---- +CONTAINERS_STORAGE_CONF=/aap/containers/storage.conf +---- + +Example with output: + +---- +$ CONTAINER_HOST=unix://run/user/1000/podman/podman.sock podman images + +REPOSITORY TAG IMAGE ID CREATED SIZE +registry.redhat.io/ansible-automation-platform-25/ee-supported-rhel8 latest 59d1bc680a7c 6 days ago 2.24 GB +registry.redhat.io/ansible-automation-platform-25/ee-minimal-rhel8 latest a64b9fc48094 6 days ago 338 MB +---- \ No newline at end of file diff --git a/downstream/modules/platform/ref-containerized-troubleshoot-install.adoc b/downstream/modules/platform/ref-containerized-troubleshoot-install.adoc new file mode 100644 index 0000000000..ab0750c542 --- /dev/null +++ b/downstream/modules/platform/ref-containerized-troubleshoot-install.adoc @@ -0,0 +1,155 @@ +[id="ref-containerized-troubleshoot-install"] + += Troubleshooting containerized {PlatformNameShort} installation + +//Upgrades How do I update my components with a new image? +// + +//The UI is inaccessible. What should I check? +//What should I check for? +//Database migrations - where? +//Are containers running web UI services - details? + +// I cannot log in to the UI + +//How do I check or reset the admin password? +//How do I check any LDAP configured logins? + +.The installation takes a long time, or has errors, what should I check? + +. Ensure your system meets the minimum requirements as outlined in the installation guide. Items such as improper storage choices and high latency when distributing across many hosts will all have a significant impact. + +. Check the installation log file located by default at `./aap_install.log` unless otherwise changed within the local installer `ansible.cfg`. + +. Enable task profiling callbacks on an ad hoc basis to give an overview of where the installation program spends the most time. To do this, use the local `ansible.cfg` file. Add a callback line such as this under the `[defaults]` section: + +---- +$ cat ansible.cfg +[defaults] +callbacks_enabled = ansible.posix.profile_tasks +---- + +.{ControllerNameStart} returns an error of 413 + +This error is due to `manifest.zip` license files that are larger than the `nginx_client_max_body_size` setting. If this error occurs, you will need to change the installation inventory file to include the following variables: + +---- +nginx_disable_hsts=false +nginx_http_port=8081 +nginx_https_port=8444 +nginx_client_max_body_size=20m +nginx_user_headers=[] +---- + +The current default setting of `20m` should be enough to avoid this issue. + +.The installation failed with a “502 Bad Gateway” when going to the controller UI. + +This error can occur and manifest itself in the installation application output as: + +---- +TASK [ansible.containerized_installer.automationcontroller : Wait for the Controller API to te ready] ****************************************************** +fatal: [daap1.lan]: FAILED! => {"changed": false, "connection": "close", "content_length": "150", "content_type": "text/html", "date": "Fri, 29 Sep 2023 09:42:32 GMT", "elapsed": 0, "msg": "Status code was 502 and not [200]: HTTP Error 502: Bad Gateway", "redirected": false, "server": "nginx", "status": 502, "url": "https://daap1.lan:443/api/v2/ping/"} +---- + +* Check if you have an `automation-controller-web` container running and a systemd service. + +[NOTE] +==== +This is used at the regular unprivileged user not system wide level. If you have used `su` to switch to the user running the containers, you must set your `XDG_RUNTIME_DIR` environment variable to the correct value to be able to interact with the user `systemctl` units. Run the command `export XDG_RUNTIME_DIR="/run/user/$UID"`. +==== + +---- +podman ps | grep web +---- + +---- +systemctl --user | grep web +---- + +No output indicates a problem. + + +. Try restarting the `automation-controller-web` service: ++ +---- +systemctl start automation-controller-web.service --user +---- ++ +---- +systemctl --user | grep web +---- ++ +---- +systemctl status automation-controller-web.service --user +---- ++ +---- +Sep 29 10:55:16 daap1.lan automation-controller-web[29875]: nginx: [emerg] bind() to 0.0.0.0:443 failed (98: Address already in use) +Sep 29 10:55:16 daap1.lan automation-controller-web[29875]: nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use) +---- ++ +The output indicates that the port is already, or still, in use by another service. In this case `nginx`. + +. Run: ++ +---- +sudo pkill nginx +---- ++ +. Restart and status check the web service again. + +Normal service output should look similar to the following, and should still be running: + +---- +Sep 29 10:59:26 daap1.lan automation-controller-web[30274]: WSGI app 0 (mountpoint='/') ready in 3 seconds on interpreter 0x1a458c10 pid: 17 (default app) +Sep 29 10:59:26 daap1.lan automation-controller-web[30274]: WSGI app 0 (mountpoint='/') ready in 3 seconds on interpreter 0x1a458c10 pid: 20 (default app) +Sep 29 10:59:27 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:27,043 INFO [-] daphne.cli Starting server at tcp:port=8051:interface=127.0.> +Sep 29 10:59:27 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:27,043 INFO Starting server at tcp:port=8051:interface=127.0.0.1 +Sep 29 10:59:27 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:27,048 INFO [-] daphne.server HTTP/2 support not enabled (install the http2 > +Sep 29 10:59:27 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:27,048 INFO HTTP/2 support not enabled (install the http2 and tls Twisted ex> +Sep 29 10:59:27 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:27,049 INFO [-] daphne.server Configuring endpoint tcp:port=8051:interface=1> +Sep 29 10:59:27 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:27,049 INFO Configuring endpoint tcp:port=8051:interface=127.0.0.1 +Sep 29 10:59:27 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:27,051 INFO [-] daphne.server Listening on TCP address 127.0.0.1:8051 +Sep 29 10:59:27 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:27,051 INFO Listening on TCP address 127.0.0.1:8051 +Sep 29 10:59:54 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:54,139 INFO success: nginx entered RUNNING state, process has stayed up for > th> +Sep 29 10:59:54 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:54,139 INFO success: nginx entered RUNNING state, process has stayed up for > th> +Sep 29 10:59:54 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:54,139 INFO success: uwsgi entered RUNNING state, process has stayed up for > th> +Sep 29 10:59:54 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:54,139 INFO success: uwsgi entered RUNNING state, process has stayed up for > th> +Sep 29 10:59:54 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:54,139 INFO success: daphne entered RUNNING state, process has stayed up for > t> +Sep 29 10:59:54 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:54,139 INFO success: daphne entered RUNNING state, process has stayed up for > t> +Sep 29 10:59:54 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:54,139 INFO success: ws-heartbeat entered RUNNING state, process has stayed up f> +Sep 29 10:59:54 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:54,139 INFO success: ws-heartbeat entered RUNNING state, process has stayed up f> +Sep 29 10:59:54 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:54,139 INFO success: cache-clear entered RUNNING state, process has stayed up fo> +Sep 29 10:59:54 daap1.lan automation-controller-web[30274]: 2023-09-29 09:59:54,139 INFO success: cache-clear entered RUNNING state, process has stayed up +---- + +You can run the installation program again to ensure everything installs as expected. + +.When attempting to install containerized {PlatformNameShort} in {AWS} you receive output that there is no space left on device + +---- +TASK [ansible.containerized_installer.automationcontroller : Create the receptor container] *************************************************** +fatal: [ec2-13-48-25-168.eu-north-1.compute.amazonaws.com]: FAILED! => {"changed": false, "msg": "Can't create container receptor", "stderr": "Error: creating container storage: creating an ID-mapped copy of layer \"98955f43cc908bd50ff43585fec2c7dd9445eaf05eecd1e3144f93ffc00ed4ba\": error during chown: storage-chown-by-maps: lchown usr/local/lib/python3.9/site-packages/azure/mgmt/network/v2019_11_01/operations/__pycache__/_available_service_aliases_operations.cpython-39.pyc: no space left on device: exit status 1\n", "stderr_lines": ["Error: creating container storage: creating an ID-mapped copy of layer \"98955f43cc908bd50ff43585fec2c7dd9445eaf05eecd1e3144f93ffc00ed4ba\": error during chown: storage-chown-by-maps: lchown usr/local/lib/python3.9/site-packages/azure/mgmt/network/v2019_11_01/operations/__pycache__/_available_service_aliases_operations.cpython-39.pyc: no space left on device: exit status 1"], "stdout": "", "stdout_lines": []} +---- + +If you are installing a `/home` filesystem into a default {AWS} marketplace RHEL instance, it might be too small since `/home` is part of the root `/` filesystem. You will need to make more space available. The documentation specifies a minimum of 40GB for a single-node deployment of containerized {PlatformNameShort}. + +."Install container tools" task fails due to unavailable packages + +This error occurs in the installation application output as: + +---- +TASK [ansible.containerized_installer.common : Install container tools] ********************************************************************************************************** +fatal: [192.0.2.1]: FAILED! => {"changed": false, "failures": ["No package crun available.", "No package podman available.", "No package slirp4netns available.", "No package fuse-overlayfs available."], "msg": "Failed to install some of the specified packages", "rc": 1, "results": []} +fatal: [192.0.2.2]: FAILED! => {"changed": false, "failures": ["No package crun available.", "No package podman available.", "No package slirp4netns available.", "No package fuse-overlayfs available."], "msg": "Failed to install some of the specified packages", "rc": 1, "results": []} +fatal: [192.0.2.3]: FAILED! => {"changed": false, "failures": ["No package crun available.", "No package podman available.", "No package slirp4netns available.", "No package fuse-overlayfs available."], "msg": "Failed to install some of the specified packages", "rc": 1, "results": []} +fatal: [192.0.2.4]: FAILED! => {"changed": false, "failures": ["No package crun available.", "No package podman available.", "No package slirp4netns available.", "No package fuse-overlayfs available."], "msg": "Failed to install some of the specified packages", "rc": 1, "results": []} +fatal: [192.0.2.5]: FAILED! => {"changed": false, "failures": ["No package crun available.", "No package podman available.", "No package slirp4netns available.", "No package fuse-overlayfs available."], "msg": "Failed to install some of the specified packages", "rc": 1, "results": []} +---- + +To fix this error, run the following command on the target hosts: + +---- +sudo subscription-manager register +---- \ No newline at end of file diff --git a/downstream/modules/platform/ref-containerized-troubleshoot-ref.adoc b/downstream/modules/platform/ref-containerized-troubleshoot-ref.adoc new file mode 100644 index 0000000000..1720d64dc0 --- /dev/null +++ b/downstream/modules/platform/ref-containerized-troubleshoot-ref.adoc @@ -0,0 +1,330 @@ +[id="ref-containerized-troubleshoot-ref"] + += Containerized {PlatformNameShort} reference + +.Can you give details of the architecture for the {PlatformNameShort} containerized design? + +We use as much of the underlying native {RHEL} technology as possible. Podman is used for the container runtime and management of services. + +Use `podman ps` to list the running containers on the system: + +---- +$ podman ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +88ed40495117 registry.redhat.io/rhel8/postgresql-13:latest run-postgresql 48 minutes ago Up 47 minutes postgresql +8f55ba612f04 registry.redhat.io/rhel8/redis-6:latest run-redis 47 minutes ago Up 47 minutes redis +56c40445c590 registry.redhat.io/ansible-automation-platform-24/ee-supported-rhel8:latest /usr/bin/receptor... 47 minutes ago Up 47 minutes receptor +f346f05d56ee registry.redhat.io/ansible-automation-platform-24/controller-rhel8:latest /usr/bin/launch_a... 47 minutes ago Up 45 minutes automation-controller-rsyslog +26e3221963e3 registry.redhat.io/ansible-automation-platform-24/controller-rhel8:latest /usr/bin/launch_a... 46 minutes ago Up 45 minutes automation-controller-task +c7ac92a1e8a1 registry.redhat.io/ansible-automation-platform-24/controller-rhel8:latest /usr/bin/launch_a... 46 minutes ago Up 28 minutes automation-controller-web +---- + +Use `podman images` to display information about locally stored images: + +---- +$ podman images + +REPOSITORY TAG IMAGE ID CREATED SIZE +registry.redhat.io/ansible-automation-platform-24/ee-supported-rhel8 latest b497bdbee59e 10 days ago 3.16 GB +registry.redhat.io/ansible-automation-platform-24/controller-rhel8 latest ed8ebb1c1baa 10 days ago 1.48 GB +registry.redhat.io/rhel8/redis-6 latest 78905519bb05 2 weeks ago 357 MB +registry.redhat.io/rhel8/postgresql-13 latest 9b65bc3d0413 2 weeks ago 765 MB +---- + +//Describe AAP Controller containers: + +//Describe AAP Hub containers: + +//Describe EDA Controller containers: + +Containerized {PlatformNameShort} runs as rootless containers for enhanced security by default. This means you can install containerized {PlatformNameShort} by using any local unprivileged user account. Privilege escalation is only needed for certain root level tasks, and by default is not needed to use root directly. + +The installation program adds the following files to the filesystem where you run the installation program on the underlying {RHEL} host: + +---- +$ tree -L 1 +. +├── aap_install.log +├── ansible.cfg +├── collections +├── galaxy.yml +├── inventory +├── LICENSE +├── meta +├── playbooks +├── plugins +├── README.md +├── requirements.yml +├── roles +---- + +The installation root directory includes other containerized services that make use of Podman volumes for example. + +Here are some examples for further reference: + +The `containers` directory includes some of the Podman specifics used and installed for the execution plane: + +---- +containers/ +├── podman +├── storage +│ ├── defaultNetworkBackend +│ ├── libpod +│ ├── networks +│ ├── overlay +│ ├── overlay-containers +│ ├── overlay-images +│ ├── overlay-layers +│ ├── storage.lock +│ └── userns.lock +└── storage.conf +---- + +The `controller` directory has some of the installed configuration and runtime data points: + +---- +controller/ +├── data +│ ├── job_execution +│ ├── projects +│ └── rsyslog +├── etc +│ ├── conf.d +│ ├── launch_awx_task.sh +│ ├── settings.py +│ ├── tower.cert +│ └── tower.key +├── nginx +│ └── etc +├── rsyslog +│ └── run +└── supervisor + └── run +---- + +The `receptor` directory has the {AutomationMesh} configuration: + +---- +receptor/ +├── etc +│ └── receptor.conf +└── run + ├── receptor.sock + └── receptor.sock.lock +---- + +After installation, you will also find other pieces in the local users home directory such as the `.cache` directory: + +---- +.cache/ +├── containers +│ └── short-name-aliases.conf.lock +└── rhsm + └── rhsm.log +---- + +As services are run using rootless Podman by default, you can use other services such as running `systemd` as non-privileged users. Under `systemd` you can see some of the component service controls available: + +The `.config` directory: + +---- +.config/ +├── cni +│ └── net.d +│ └── cni.lock +├── containers +│ ├── auth.json +│ └── containers.conf +└── systemd + └── user + ├── automation-controller-rsyslog.service + ├── automation-controller-task.service + ├── automation-controller-web.service + ├── default.target.wants + ├── podman.service.d + ├── postgresql.service + ├── receptor.service + ├── redis.service + └── sockets.target.wants +---- + +This is specific to Podman and conforms to the Open Container Initiative (OCI) specifications. When you run Podman as the root user `/var/lib/containers` is used by default, for standard users the hierarchy under `$HOME/.local` is used. + +The `.local` directory: + +---- +.local/ +└── share + └── containers + ├── cache + ├── podman + └── storage +---- + +As an example `.local/storage/volumes` contains what the output from `podman volume ls` provides: + +---- +$ podman volume ls + +DRIVER VOLUME NAME +local d73d3fe63a957bee04b4853fd38c39bf37c321d14fdab9ee3c9df03645135788 +local postgresql +local redis_data +local redis_etc +local redis_run +---- + +The execution plane is isolated from the control plane main services to ensure it does not affect the main services. + +*Control plane services* + +Control plane services run with the standard Podman configuration and can be found in: `~/.local/share/containers/storage`. + +*Execution plane services* + +Execution plane services ({ControllerName}, {EDAName} and execution nodes) use a dedicated configuration found in `~/aap/containers/storage.conf`. This separation prevents execution plane containers from affecting the control plane services. + +You can view the execution plane configuration with one of the following commands: + +---- +CONTAINERS_STORAGE_CONF=~/aap/containers/storage.conf podman +---- + +---- +CONTAINER_HOST=unix://run/user//podman/podman.sock podman +---- + + +.How can I see host resource utilization statistics? + +* Run: + +---- +$ podman container stats -a +---- + +---- +ID NAME CPU % MEM USAGE / LIMIT MEM % NET IO BLOCK IO PIDS CPU TIME AVG CPU % +0d5d8eb93c18 automation-controller-web 0.23% 959.1MB / 3.761GB 25.50% 0B / 0B 0B / 0B 16 20.885142s 1.19% +3429d559836d automation-controller-rsyslog 0.07% 144.5MB / 3.761GB 3.84% 0B / 0B 0B / 0B 6 4.099565s 0.23% +448d0bae0942 automation-controller-task 1.51% 633.1MB / 3.761GB 16.83% 0B / 0B 0B / 0B 33 34.285272s 1.93% +7f140e65b57e receptor 0.01% 5.923MB / 3.761GB 0.16% 0B / 0B 0B / 0B 7 1.010613s 0.06% +c1458367ca9c redis 0.48% 10.52MB / 3.761GB 0.28% 0B / 0B 0B / 0B 5 9.074042s 0.47% +ef712cc2dc89 postgresql 0.09% 21.88MB / 3.761GB 0.58% 0B / 0B 0B / 0B 21 15.571059s 0.80% +---- + +The previous is an example of a Dell sold and offered containerized {PlatformNameShort} solution (DAAP) install and utilizes ~1.8Gb RAM. + + +.How much storage is used and where? + +The container volume storage is under the local user at `$HOME/.local/share/containers/storage/volumes`. + +. To view the details of each volume run: ++ +---- +$ podman volume ls +---- ++ +. Then run: ++ +---- +$ podman volume inspect +---- + +Here is an example: + +---- +$ podman volume inspect postgresql +[ + { + "Name": "postgresql", + "Driver": "local", + "Mountpoint": "/home/aap/.local/share/containers/storage/volumes/postgresql/_data", + "CreatedAt": "2024-01-08T23:39:24.983964686Z", + "Labels": {}, + "Scope": "local", + "Options": {}, + "MountCount": 0, + "NeedsCopyUp": true + } +] +---- + + +Several files created by the installation program are located in `$HOME/aap/` and bind-mounted into various running containers. + + +. To view the mounts associated with a container run: ++ +---- +$ podman ps --format "{{.ID}}\t{{.Command}}\t{{.Names}}" +---- ++ +---- +89e779b81b83 run-postgresql postgresql +4c33cc77ef7d run-redis redis +3d8a028d892d /usr/bin/receptor... receptor +09821701645c /usr/bin/launch_a... automation-controller-rsyslog +a2ddb5cac71b /usr/bin/launch_a... automation-controller-task +fa0029a3b003 /usr/bin/launch_a... automation-controller-web +20f192534691 gunicorn --bind 1... automation-eda-api +f49804c7e6cb daphne -b 127.0.0... automation-eda-daphne +d340b9c1cb74 /bin/sh -c nginx ... automation-eda-web +111f47de5205 aap-eda-manage rq... automation-eda-worker-1 +171fcb1785af aap-eda-manage rq... automation-eda-worker-2 +049d10555b51 aap-eda-manage rq... automation-eda-activation-worker-1 +7a78a41a8425 aap-eda-manage rq... automation-eda-activation-worker-2 +da9afa8ef5e2 aap-eda-manage sc... automation-eda-scheduler +8a2958be9baf gunicorn --name p... automation-hub-api +0a8b57581749 gunicorn --name p... automation-hub-content +68005b987498 nginx -g daemon o... automation-hub-web +cb07af77f89f pulpcore-worker automation-hub-worker-1 +a3ba05136446 pulpcore-worker automation-hub-worker-2 +---- ++ + +. Then run: ++ +---- +$ podman inspect | jq -r .[].Mounts[].Source +---- ++ +---- +/home/aap/.local/share/containers/storage/volumes/receptor_run/_data +/home/aap/.local/share/containers/storage/volumes/redis_run/_data +/home/aap/aap/controller/data/rsyslog +/home/aap/aap/controller/etc/tower.key +/home/aap/aap/controller/etc/conf.d/callback_receiver_workers.py +/home/aap/aap/controller/data/job_execution +/home/aap/aap/controller/nginx/etc/controller.conf +/home/aap/aap/controller/etc/conf.d/subscription_usage_model.py +/home/aap/aap/controller/etc/conf.d/cluster_host_id.py +/home/aap/aap/controller/etc/conf.d/insights.py +/home/aap/aap/controller/rsyslog/run +/home/aap/aap/controller/data/projects +/home/aap/aap/controller/etc/settings.py +/home/aap/aap/receptor/etc/receptor.conf +/home/aap/aap/controller/etc/conf.d/execution_environments.py +/home/aap/aap/tls/extracted +/home/aap/aap/controller/supervisor/run +/home/aap/aap/controller/etc/uwsgi.ini +/home/aap/aap/controller/etc/conf.d/container_groups.py +/home/aap/aap/controller/etc/launch_awx_task.sh +/home/aap/aap/controller/etc/tower.cert +---- + ++ + +. If the `jq` RPM is not installed, install with: ++ +---- +$ sudo dnf -y install jq +---- + +// Reference How do I check on the health of the service and that all components are running? +// +// I am experiencing errors and want to look for the application logs, where do I look? +// + diff --git a/downstream/modules/platform/ref-controller-analytics-reports.adoc b/downstream/modules/platform/ref-controller-analytics-reports.adoc index 54eda6976d..2890bc6eb2 100644 --- a/downstream/modules/platform/ref-controller-analytics-reports.adoc +++ b/downstream/modules/platform/ref-controller-analytics-reports.adoc @@ -1,25 +1,34 @@ [id="ref-controller-analytics-reports"] = Analytics Reports +//[ddacosta - removed to reflect current environment but this might be updated in the product later and this statement could be added back.] +//Reports from collection are accessible through the {ControllerName} UI if you have superuser-level permissions. +//By including the analytics view on-prem where it is most convenient, you can access data that can affect your day-to-day work. +//This data is aggregated from the automation provided on link:https://console.redhat.com[{Console}]. -Reports from collection are accessible through the {ControllerName} UI if you have superuser-level permissions. -By including the analytics view on-prem where it is most convenient, you can access data that can affect your day-to-day work. -This data is aggregated from the automation provided on link:https://console.redhat.com[{Console}]. +Reports for data collected are available through link:https://console.redhat.com[{Console}]. -Currently available is a view-only version of the Automation Calculator utility that shows a report that represents (possible) savings to the subscriber. +Other {Analytics} data currently available and accessible through the platform UI include the following: + +*Automation Calculator* is a view-only version of the Automation Calculator utility that shows a report that represents (possible) savings to the subscriber. image:aa-automation-calculator.png[Automation calculator] -[NOTE] -==== -This option is available for technical preview and is subject to change in a future release. -To preview the analytic reports view, set the *Enable Preview of New User Interface* toggle to *On* from the *Miscellaneous System Settings* option of the {MenuAEAdminSettings} menu. +*Host Metrics* is an analytics report collected for host data such as, when they were first automated, when they were most recently automated, how many times they were automated, and how many times each host has been deleted. + +*Subscription Usage* reports the historical usage of your subscription. Subscription capacity and licenses consumed per month are displayed, with the ability to filter by the last year, two years, or three years. + +//I don't think this is included +//[NOTE] +//==== +//This option is available for technical preview and is subject to change in a future release. +//To preview the analytic reports view, set the *Enable Preview of New User Interface* toggle to *On* from the *Miscellaneous System Settings* option of the {MenuAEAdminSettings} menu. -After saving, logout and log back in to access the options under the *Analytics* section on the navigation panel. +//After saving, logout and log back in to access the options under the *Analytics* section on the navigation panel. -image:aa-options-navbar.png[Navigation panel] -==== +//image:aa-options-navbar.png[Navigation panel] +//==== -Host Metrics is another analytics report collected for host data. -The ability to access this option from this part of the UI is currently in tech preview and is subject to change in a future release. -For more information, see the _Host Metrics view_ in xref:controller-config[{ControllerNameStart} configuration]. +//Host Metrics is another analytics report collected for host data. +//The ability to access this option from this part of the UI is currently in tech preview and is subject to change in a future release. +//For more information, see the _Host Metrics view_ in xref:controller-config[{ControllerNameStart} configuration]. diff --git a/downstream/modules/platform/ref-controller-api-field-lookups.adoc b/downstream/modules/platform/ref-controller-api-field-lookups.adoc index 7b6f7d9f8e..be00f05328 100644 --- a/downstream/modules/platform/ref-controller-api-field-lookups.adoc +++ b/downstream/modules/platform/ref-controller-api-field-lookups.adoc @@ -11,7 +11,7 @@ You can use field lookups for more advanced queries, by appending the lookup to The following field lookups are supported: -* exact: Exact match (default lookup if not specified). +* exact: Exact match (default lookup if not specified, see the following note for more information). * iexact: Case-insensitive version of exact. * contains: Field contains value. * icontains: Case-insensitive version of contains. @@ -37,3 +37,21 @@ You can specify lists (for the `in` lookup) as a comma-separated list of values. Filtering based on the requesting user's level of access by query string parameter: * `role_level`: Level of role to filter on, such as `admin_role` + +[NOTE] +==== +Earlier releases of {PlatformNameShort} returned queries with *_exact* results by default. +As a workaround, set the `limit` to `?limit_exact` for the default filter. +For example, `/api/v2/jobs/?limit_exact=example.domain.com` results in: + +---- +{ + "count": 1, + "next": null, + "previous": null, + "results": [ +... +---- +==== + + diff --git a/downstream/modules/platform/ref-controller-app-token-functions.adoc b/downstream/modules/platform/ref-controller-app-token-functions.adoc index 2ed67f6ca3..7fabc1cb6b 100644 --- a/downstream/modules/platform/ref-controller-app-token-functions.adoc +++ b/downstream/modules/platform/ref-controller-app-token-functions.adoc @@ -1,5 +1,5 @@ [id="ref-controller-app-token-functions"] -= Application Token Functions += Application token functions -The `refresh` and `revoke` functions associated with tokens, for tokens at the `/api/o/` endpoints can currently only be carried out with application tokens. \ No newline at end of file +The `refresh` and `revoke` functions associated with tokens, for tokens at the `/o/` endpoints can currently only be carried out with application tokens. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-applications-getting-started.adoc b/downstream/modules/platform/ref-controller-applications-getting-started.adoc index 513519c9b0..32ca2c993e 100644 --- a/downstream/modules/platform/ref-controller-applications-getting-started.adoc +++ b/downstream/modules/platform/ref-controller-applications-getting-started.adoc @@ -2,11 +2,10 @@ = Getting started with OAuth Applications -From the navigation panel, select {MenuAMAdminOauthApps}. -The *OAuth Applications* page displays a searchable list of all available applications currently managed by {PlatformNameShort} and can that you can sort by *Name*. +You can access the *OAuth Applications* page from the navigation panel by selecting {MenuAMAdminOauthApps}. From there you can view, create, sort and search for applications currently managed by {PlatformNameShort} and {ControllerName}. //image:apps-list-view-examples.png[Applications- with example apps] -If no applications exist, you are requested to create applications. +If no applications exist, you can create one by clicking btn:[Create OAuth application]. //image:apps-list-view-empty.png[Add applications] diff --git a/downstream/modules/platform/ref-controller-approval-nodes.adoc b/downstream/modules/platform/ref-controller-approval-nodes.adoc index 1de6f64a6a..d5a64bfa33 100644 --- a/downstream/modules/platform/ref-controller-approval-nodes.adoc +++ b/downstream/modules/platform/ref-controller-approval-nodes.adoc @@ -2,7 +2,7 @@ = Approval nodes -Choosing an *Approval* node requires your intervention in order to advance the workflow. +Choosing an *Approval* node requires your intervention to advance a workflow. This functions as a means to pause the workflow in between playbooks so that you can give approval to continue on to the next playbook in the workflow. This gives the user a specified amount of time to intervene, but also enables you to continue as quickly as possible without having to wait on another trigger. @@ -17,7 +17,7 @@ The approver is anyone who meets the following criteria: * A user who has organization administrator or above privileges (for the organization associated with that workflow job template). * A user who has the *Approve* permission explicitly assigned to them within that specific workflow job template. -image::ug-wf-node-approval-notifications.png[Node approval notifications] +//image::ug-wf-node-approval-notifications.png[Node approval notifications] If pending approval nodes are not approved within the specified time limit (if an expiration was assigned) or they are denied, then they are marked as "timed out" or "failed", and move on to the next "on fail node" or "always node". If approved, the "on success" path is taken. diff --git a/downstream/modules/platform/ref-controller-audit-functionality.adoc b/downstream/modules/platform/ref-controller-audit-functionality.adoc index e15a680232..58c37ec208 100644 --- a/downstream/modules/platform/ref-controller-audit-functionality.adoc +++ b/downstream/modules/platform/ref-controller-audit-functionality.adoc @@ -3,9 +3,9 @@ = Audit and logging functionality For any administrative access, it is important to audit and watch for actions. -For the system overall, you can do this through the built in audit support and the built-in logging support. +For the system overall, you can do this through the built-in audit support and the built-in logging support. -For {ControllerName}, you can do this through the built-in Activity Stream support that logs all changes within {ControllerName}, as well as through the automation logs. +For {ControllerName}, you can do this through the built-in Activity Stream support that logs all changes within {ControllerName}, and through the automation logs. Best practices dictate collecting logging and auditing centrally rather than reviewing it on the local system. You must configure {ControllerName} to use standard IDs or logging and auditing (Splunk) in your environment. diff --git a/downstream/modules/platform/ref-controller-automation-analytics.adoc b/downstream/modules/platform/ref-controller-automation-analytics.adoc index c70c051c72..d8bf14a429 100644 --- a/downstream/modules/platform/ref-controller-automation-analytics.adoc +++ b/downstream/modules/platform/ref-controller-automation-analytics.adoc @@ -2,7 +2,7 @@ = {Analytics} -When you imported your license for the first time, you were given options related to the collection of data that powers {Analytics}, a cloud service that is part of the {PlatformNameShort} subscription. +When you imported your license for the first time, you were automatically opted in for the collection of data that powers {Analytics}, a cloud service that is part of the {PlatformNameShort} subscription. [IMPORTANT] ==== @@ -12,15 +12,7 @@ For opt-in of {Analytics} to have any effect, your instance of {ControllerName} As with Red Hat Insights, {Analytics} is built to collect the minimum amount of data needed. No credential secrets, personal data, automation variables, or task output is gathered. -For more information, see xref:ref-controller-data-collection-details[Details of data collection]. - -To enable this feature, turn on data collection for {Analytics} and enter your Red Hat customer credentials in the *Miscellaneous System settings* of the System configuration list of options in the {MenuAEAdminSettings} menu. - -image:configure-controller-system-misc-analytics.png[Miscellaneous System Settings] - -You can view the location to which the collection of insights data is uploaded in the *{Analytics} upload URL* field on the *Details* page. - -image:misc-system-details-analytics-url.png[Insights location] +When you imported your license for the first time, you were automatically opted in to {Analytics}. To configure or disable this feature, see xref:proc-controller-configure-analytics[Configuring {Analytics}]. By default, the data is collected every four hours. When you enable this feature, data is collected up to a month in arrears (or until the previous collection). @@ -34,7 +26,7 @@ This setting can also be enabled through the API by specifying `INSIGHTS_TRACKIN The {Analytics} generated from this data collection can be found on the link:https://cloud.redhat.com[Red Hat Cloud Services] portal. -image:aa-dashboard.png[Analytics dashboard] +//image:aa-dashboard.png[Analytics dashboard] *Clusters* data is the default view. This graph represents the number of job runs across all {ControllerName} clusters over a period of time. @@ -53,3 +45,8 @@ On the clouds navigation panel, select menu:Organization Statistics[] to view in * xref:ref-controller-use-by-organization[Use by organization] * xref:ref-controller-jobs-run-by-organization[Job runs by organization] * xref:ref-controller-organization-status[Organization status] + +[NOTE] +==== +The organization statistics page will be deprecated in a future release. +==== diff --git a/downstream/modules/platform/ref-controller-aws-secrets-lookup.adoc b/downstream/modules/platform/ref-controller-aws-secrets-lookup.adoc new file mode 100644 index 0000000000..b88aaa5886 --- /dev/null +++ b/downstream/modules/platform/ref-controller-aws-secrets-lookup.adoc @@ -0,0 +1,5 @@ +[id="ref-controller-aws-secrets-lookup"] + += AWS secrets manager lookup + +This is considered part of the secret management capability. For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/configuring_automation_execution/assembly-controller-secret-management#ref-aws-secrets-manager-lookup[AWS Secrets Manager Lookup] \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-capacity-planning-exercise.adoc b/downstream/modules/platform/ref-controller-capacity-planning-exercise.adoc index 7d1b5d5936..1d2f43a0ca 100644 --- a/downstream/modules/platform/ref-controller-capacity-planning-exercise.adoc +++ b/downstream/modules/platform/ref-controller-capacity-planning-exercise.adoc @@ -20,7 +20,7 @@ include::ref-controller-example-workload-reqs.adoc[leveloffset=+1] |=== | Node | API capacity | Default execution capacity | Default control capacity | Mean event processing rate at 100% capacity usage | Mean events processing rate at 50% capacity usage | Mean event processing rate at 40% capacity usage -| 4 CPU at 2.5Ghz, 16 GB RAM control node, a maximum of 3000 IOPs disk | approximately 10 requests per second | n/a | 137 jobs | 1100 per second | 1400 per second | 1630 per second +| 4 CPU at 2.5Ghz, 16 GB RAM control node, a maximum of 3000 IOPs disk | about 10 requests per second | n/a | 137 jobs | 1100 per second | 1400 per second | 1630 per second | 4 CPU at 2.5Ghz, 16 GB RAM execution node, a maximum of 3000 IOPs disk | n/a | 137 | n/a | n/a | n/a | n/a | 4 CPU at 2.5Ghz, 16 GB RAM database node, a maximum of 3000 IOPs disk | n/a | n/a | n/a | n/a | n/a | n/a |=== @@ -29,13 +29,13 @@ Because controlling jobs competes with job event processing on the control node, For this example, for a workload on 300 managed hosts, executing 1000 tasks per hour per host, 10 concurrent jobs with forks set to 5 on playbooks, and an average event size 1 Mb, use the following procedure: -* Deploy 1 execution node, 1 control node, 1 database node of 4 CPU at 2.5Ghz, 16 GB RAM, and disks that have approximately 3000 IOPs. +* Deploy 1 execution node, 1 control node, 1 database node of 4 CPU at 2.5Ghz, 16 GB RAM, and disks that have about 3000 IOPs. * Keep the default fork setting of 5 on job templates. -* Use the capacity adjustment feature in the instance view of the UI on the control node to reduce the capacity down to 16, the lowest value, to reserve more of the control node's capacity for processing events. +* Use the capacity change feature in the instance view of the UI on the control node to reduce the capacity down to 16, the lowest value, to reserve more of the control node's capacity for processing events. .Additional Resources -* For more information on workloads with high levels of API interaction, see link:https://www.ansible.com/blog/scaling-automation-controller-for-api-driven-workloads[Scaling Automation Controller for API Driven Workloads]. -* For more information on managing capacity with instances, see xref:assembly-controller-instances[Managing Capacity With Instances]. -* For more information on operator-based deployments, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_performance_considerations_for_operator_based_installations/index[Red Hat Ansible Automation Platform Performance Considerations for Operator Based Installations]. +* For more information about workloads with high levels of API interaction, see link:https://www.ansible.com/blog/scaling-automation-controller-for-api-driven-workloads[Scaling Automation Controller for API Driven Workloads]. +* For more information about managing capacity with instances, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/using_automation_execution/index#assembly-controller-instances[Managing capacity with Instances]. +* For more information about operator-based deployments, see link:{URLOCPPerformanceGuide}/index[{PlatformName} considerations for operator environments]. diff --git a/downstream/modules/platform/ref-controller-capacity-planning.adoc b/downstream/modules/platform/ref-controller-capacity-planning.adoc index c61e89b661..cf6c9b32cd 100644 --- a/downstream/modules/platform/ref-controller-capacity-planning.adoc +++ b/downstream/modules/platform/ref-controller-capacity-planning.adoc @@ -1,6 +1,6 @@ [id="ref-controller-capacity-planning"] -= Capacity Planning for deploying {ControllerName} += Capacity planning for deploying {ControllerName} Capacity planning for {ControllerName} is planning the scale and characteristics of your deployment so that it has the capacity to run the planned workload. Capacity planning includes the following phases: diff --git a/downstream/modules/platform/ref-controller-change-admin-password.adoc b/downstream/modules/platform/ref-controller-change-admin-password.adoc index 166d67f74e..2677ea48e3 100644 --- a/downstream/modules/platform/ref-controller-change-admin-password.adoc +++ b/downstream/modules/platform/ref-controller-change-admin-password.adoc @@ -2,8 +2,8 @@ = Change the {ControllerName} Administrator Password -During the installation process, you are prompted to enter an administrator password which is used for the `admin` superuser or system administrator created by {ControllerName}. -If you log into the instance using SSH, it tells you the default administrator password in the prompt. +During the installation process, you are prompted to enter an administrator password that is used for the `admin` superuser or system administrator created by {ControllerName}. +If you log in to the instance by using SSH, it tells you the default administrator password in the prompt. If you need to change this password at any point, run the following command as root on the {ControllerName} server: diff --git a/downstream/modules/platform/ref-controller-cleanup-expired-tokens.adoc b/downstream/modules/platform/ref-controller-cleanup-expired-tokens.adoc index f837af0f59..f054dbda6b 100644 --- a/downstream/modules/platform/ref-controller-cleanup-expired-tokens.adoc +++ b/downstream/modules/platform/ref-controller-cleanup-expired-tokens.adoc @@ -9,7 +9,7 @@ management jobs. For more information, see xref:proc-controller-scheduling-deletion[Scheduling deletion]. -You can also set or review notifications associated with this management job the same way as described in xref:proc-controller-management-notifications[setting notifications] for activity +You can also set or review notifications associated with this management job the same way as described in xref:proc-controller-management-notifications[Setting notifications] for activity stream management jobs. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-notifications[Notifications] in the _{ControllerUG}_. +For more information, see link:{URLControllerUserGuide}/controller-notifications[Notifications] in _{ControllerUG}_. diff --git a/downstream/modules/platform/ref-controller-cleanup-old-data.adoc b/downstream/modules/platform/ref-controller-cleanup-old-data.adoc index 210eaee027..1119c4aa4e 100644 --- a/downstream/modules/platform/ref-controller-cleanup-old-data.adoc +++ b/downstream/modules/platform/ref-controller-cleanup-old-data.adoc @@ -17,4 +17,4 @@ This permanently deletes the job details and job output for jobs older than a sp awx-manage cleanup_activitystream [--help] ---- -This permanently deletes any link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/assembly-controller-user-interface#proc-controller-activity-stream[Activity stream] data older than a specific number of days. \ No newline at end of file +This permanently deletes any [Activity stream] data older than a specific number of days. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-clear-sessions.adoc b/downstream/modules/platform/ref-controller-clear-sessions.adoc index 795aff35c0..64a11163d0 100644 --- a/downstream/modules/platform/ref-controller-clear-sessions.adoc +++ b/downstream/modules/platform/ref-controller-clear-sessions.adoc @@ -6,4 +6,4 @@ Use this command to delete all sessions that have expired. For more information, see link:https://docs.djangoproject.com/en/4.2/topics/http/sessions/#clearing-the-session-store[Clearing the session store] in Django's Oauth Toolkit documentation. -For more information on OAuth2 token management in the UI, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/assembly-controller-applications[Applications] section of the {ControllerUG}. +For more information on OAuth2 token management in the UI, see the xref:assembly-controller-applications[Applications]. diff --git a/downstream/modules/platform/ref-controller-cluster-install.adoc b/downstream/modules/platform/ref-controller-cluster-install.adoc index 0e1fea3565..db5dbdaf36 100644 --- a/downstream/modules/platform/ref-controller-cluster-install.adoc +++ b/downstream/modules/platform/ref-controller-cluster-install.adoc @@ -55,7 +55,7 @@ hostC routable_hostname=10.1.0.4 routable_hostname ---- -For more information about `routable_hostname`, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/appendix-inventory-files-vars#ref-genera-inventory-variables[General variables] in the _{PlatformName} Installation Guide_. +For more information about `routable_hostname`, see link:{URLInstallationGuide}/appendix-inventory-files-vars#ref-genera-inventory-variables[General variables] in the _{TitleInstallationGuide}_. [IMPORTANT] ==== diff --git a/downstream/modules/platform/ref-controller-cluster-management.adoc b/downstream/modules/platform/ref-controller-cluster-management.adoc index 8c2684d71e..d3549bd9a9 100644 --- a/downstream/modules/platform/ref-controller-cluster-management.adoc +++ b/downstream/modules/platform/ref-controller-cluster-management.adoc @@ -2,7 +2,7 @@ = Cluster management -For more information on the `awx-manage provision_instance` and `awx-manage deprovision_instance` commands, see xref:controller-clustering[Clustering]. +For more information about the `awx-manage provision_instance` and `awx-manage deprovision_instance` commands, see xref:controller-clustering[Clustering]. [NOTE] ==== diff --git a/downstream/modules/platform/ref-controller-config-json.adoc b/downstream/modules/platform/ref-controller-config-json.adoc index 7ee09a2461..3edb7ef2a3 100644 --- a/downstream/modules/platform/ref-controller-config-json.adoc +++ b/downstream/modules/platform/ref-controller-config-json.adoc @@ -47,7 +47,7 @@ Which includes the following fields: * *ansible_version*: The system Ansible version on the host * *authentication_backends*: The user authentication backends that are available. -For more information, see xref:assembly-controller-set-up-social-authentication[Setting up social authentication] or xref:controller-LDAP-authentication[Setting up LDAP authentication]. +For more information, see link:{URLCentralAuth}/index#gw-config-authentication-type[Configuring an authentication type]. * *external_logger_enabled*: Whether external logging is enabled * *external_logger_type*: What logging backend is in use if enabled. For more information, see xref:assembly-controller-logging-aggregation[Logging and aggregation]. diff --git a/downstream/modules/platform/ref-controller-connect-to-host.adoc b/downstream/modules/platform/ref-controller-connect-to-host.adoc index d490400401..1609336719 100644 --- a/downstream/modules/platform/ref-controller-connect-to-host.adoc +++ b/downstream/modules/platform/ref-controller-connect-to-host.adoc @@ -2,9 +2,9 @@ = Unable to connect to your host -If you are unable to run the `helloworld.yml` example playbook from the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/getting_started_with_automation_controller/index#controller-projects[Managing projects] section of the _{ControllerGS}_ guide or other playbooks due to host connection errors, try the following: +//If you are unable to run the `helloworld.yml` example playbook from the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/getting_started_with_automation_controller/index#controller-projects[Managing projects] section of the _{ControllerGS}_ guide or other playbooks due to host connection errors, try the following: -* Can you `ssh` to your host? -Ansible depends on SSH access to the servers you are managing. -* Are your `hostnames` and IPs correctly added in your inventory file? -Check for typos. +//* Can you `ssh` to your host? +//Ansible depends on SSH access to the servers you are managing. +//* Are your `hostnames` and IPs correctly added in your inventory file? +//Check for typos. diff --git a/downstream/modules/platform/ref-controller-content-sourcing.adoc b/downstream/modules/platform/ref-controller-content-sourcing.adoc index b6601e58c2..c9b2a83bad 100644 --- a/downstream/modules/platform/ref-controller-content-sourcing.adoc +++ b/downstream/modules/platform/ref-controller-content-sourcing.adoc @@ -23,11 +23,11 @@ Additionally, post-upgrade, these settings are not visible (or editable) from th {ControllerNameStart} continues to fetch roles directly from public Galaxy even if `galaxy.ansible.com` is not the first credential in the list for the organization. The global Galaxy settings are no longer configured at the jobs level, but at the organization level in the user interface. -The organization's *Add* and *Edit* windows have an optional *Credential* lookup field for credentials of `kind=galaxy`. +The organization's *Create organization* and *Edit organization* windows have an optional *Galaxy credentials* lookup field for credentials of `kind=galaxy`. image:organizations-galaxy-credentials.png[Create organization] It is important to specify the order of these credentials as order sets precedence for the sync and lookup of the content. -For more information, see xref:proc-controller-create-organization[Creating an organization]. +For more information, see link:{URLCentralAuth}/gw-managing-access#proc-controller-create-organization[Creating an organization]. For more information about how to set up a project by using collections, see xref:proc-projects-using-collections-with-hub[Using Collections with {HubName}]. diff --git a/downstream/modules/platform/ref-controller-create-oauth2-token.adoc b/downstream/modules/platform/ref-controller-create-oauth2-token.adoc index 50456f9502..2fc749366d 100644 --- a/downstream/modules/platform/ref-controller-create-oauth2-token.adoc +++ b/downstream/modules/platform/ref-controller-create-oauth2-token.adoc @@ -6,7 +6,7 @@ Use the following command to create OAuth2 tokens (specify the username for `exa [literal, options="nowrap" subs="+attributes"] ---- -$ awx-manage create_oauth2_token --user example_user +$ aap-gateway-manage create_oauth2_token --user example_user New OAuth2 token for example_user: j89ia8OO79te6IAZ97L7E8bMgXCON2 ---- diff --git a/downstream/modules/platform/ref-controller-credential-azure-key.adoc b/downstream/modules/platform/ref-controller-credential-azure-key.adoc index 85d1cda63e..00da4cf070 100644 --- a/downstream/modules/platform/ref-controller-credential-azure-key.adoc +++ b/downstream/modules/platform/ref-controller-credential-azure-key.adoc @@ -4,4 +4,4 @@ This is considered part of the secret management capability. -For more information, see xref:ref-azure-key-vault-lookup[{Azure} Key Vault]. \ No newline at end of file +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/configuring_automation_execution/assembly-controller-secret-management#ref-azure-key-vault-lookup[{Azure} Key Vault]. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-credential-bitbucket.adoc b/downstream/modules/platform/ref-controller-credential-bitbucket.adoc new file mode 100644 index 0000000000..1e27842b20 --- /dev/null +++ b/downstream/modules/platform/ref-controller-credential-bitbucket.adoc @@ -0,0 +1,8 @@ +[id="ref-controller-credential-bitbucket"] + += BitBucket data center HTTP access token + +Bitbucket Data Center is a self-hosted Git repository for collaboration and management. +Select this credential type to enable you to use HTTP access tokens in place of passwords for Git over HTTPS. + +For further information, see link:https://confluence.atlassian.com/bitbucketserver/http-access-tokens-939515499.html[HTTP access tokens] in the Bitbucket Data Center documentation.. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-credential-centrify-vault.adoc b/downstream/modules/platform/ref-controller-credential-centrify-vault.adoc index 7a3541bb15..e06d268aaa 100644 --- a/downstream/modules/platform/ref-controller-credential-centrify-vault.adoc +++ b/downstream/modules/platform/ref-controller-credential-centrify-vault.adoc @@ -3,4 +3,4 @@ = Centrify Vault Credential Provider Lookup credential type This is considered part of the secret management capability. -For more information, see xref:ref-centrify-vault-lookup[Centrify Vault Credential Provider Lookup]. \ No newline at end of file +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/configuring_automation_execution/assembly-controller-secret-management#ref-centrify-vault-lookup[Centrify Vault Credential Provider Lookup]. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-credential-cyberark-central.adoc b/downstream/modules/platform/ref-controller-credential-cyberark-central.adoc index 85b0caf43c..5ef3290cf8 100644 --- a/downstream/modules/platform/ref-controller-credential-cyberark-central.adoc +++ b/downstream/modules/platform/ref-controller-credential-cyberark-central.adoc @@ -4,4 +4,4 @@ This is considered part of the secret management capability. -For more information, see xref:ref-cyberark-ccp-lookup[CyberArk Central Credential Provider (CCP) Lookup]. \ No newline at end of file +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/configuring_automation_execution/assembly-controller-secret-management#ref-cyberark-ccp-lookup[CyberArk Central Credential Provider (CCP) Lookup]. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-credential-cyberark-conjur.adoc b/downstream/modules/platform/ref-controller-credential-cyberark-conjur.adoc index 745c43536a..e7b756bba4 100644 --- a/downstream/modules/platform/ref-controller-credential-cyberark-conjur.adoc +++ b/downstream/modules/platform/ref-controller-credential-cyberark-conjur.adoc @@ -4,4 +4,4 @@ This is considered part of the secret management capability. -For more information, see xref:ref-cyberark-conjur-lookup[CyberArk Conjur Secrets Manager Lookup]. \ No newline at end of file +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/configuring_automation_execution/assembly-controller-secret-management#ref-cyberark-conjur-lookup[CyberArk Conjur Secrets Manager Lookup]. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-credential-gitHub-pat.adoc b/downstream/modules/platform/ref-controller-credential-gitHub-pat.adoc index c9fc350f92..ac7fed7bb3 100644 --- a/downstream/modules/platform/ref-controller-credential-gitHub-pat.adoc +++ b/downstream/modules/platform/ref-controller-credential-gitHub-pat.adoc @@ -4,7 +4,7 @@ Select this credential to enable you to access GitHub by using a _Personal Access Token_ (PAT), which you can get through GitHub. -For more information, see xref:controller-set-up-github-webhook[Working with Webhooks]. +For more information, see xref:controller-set-up-github-webhook[Setting up a GitHub webhook]. GitHub PAT credentials require a value in the *Token* field, which is provided in your GitHub profile settings. diff --git a/downstream/modules/platform/ref-controller-credential-gitLab-pat.adoc b/downstream/modules/platform/ref-controller-credential-gitLab-pat.adoc index 9205d610cc..6cc2614076 100644 --- a/downstream/modules/platform/ref-controller-credential-gitLab-pat.adoc +++ b/downstream/modules/platform/ref-controller-credential-gitLab-pat.adoc @@ -4,7 +4,7 @@ Select this credential to enable you to access GitLab by using a _Personal Access Token_ (PAT), which you can get through GitLab. -For more information, see xref:controller-set-up-github-webhook[Working with Webhooks]. +For more information, see xref:controller-set-up-gitlab-webhook[Setting up a GitLab webhook]. GitLab PAT credentials require a value in the *Token* field, which is provided in your GitLab profile settings. diff --git a/downstream/modules/platform/ref-controller-credential-hashiCorp-secret.adoc b/downstream/modules/platform/ref-controller-credential-hashiCorp-secret.adoc index 0856907152..89d12896a3 100644 --- a/downstream/modules/platform/ref-controller-credential-hashiCorp-secret.adoc +++ b/downstream/modules/platform/ref-controller-credential-hashiCorp-secret.adoc @@ -4,4 +4,4 @@ This is considered part of the secret management capability. -For more information, see xref:ref-hashicorp-vault-lookup[HashiCorp Vault Secret Lookup]. \ No newline at end of file +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/configuring_automation_execution/assembly-controller-secret-management#ref-hashicorp-vault-lookup[HashiCorp Vault Secret Lookup]. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-credential-hashiCorp-vault.adoc b/downstream/modules/platform/ref-controller-credential-hashiCorp-vault.adoc index dde92210cf..dc7347f240 100644 --- a/downstream/modules/platform/ref-controller-credential-hashiCorp-vault.adoc +++ b/downstream/modules/platform/ref-controller-credential-hashiCorp-vault.adoc @@ -4,4 +4,4 @@ This is considered part of the secret management capability. -For more information, see xref:ref-hashicorp-signed-ssh[HashiCorp Vault Signed SSH]. \ No newline at end of file +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/configuring_automation_execution/ref-hashicorp-signed-ssh[HashiCorp Vault Signed SSH]. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-credential-machine.adoc b/downstream/modules/platform/ref-controller-credential-machine.adoc index 8b36c1f33d..48581ec7e5 100644 --- a/downstream/modules/platform/ref-controller-credential-machine.adoc +++ b/downstream/modules/platform/ref-controller-credential-machine.adoc @@ -28,9 +28,9 @@ In these cases, a dialog opens when the job is launched, prompting the user to e * *Privilege Escalation Method*: Specifies the type of escalation privilege to assign to specific users. This is the same as specifying the `--become-method=BECOME_METHOD` parameter, where `BECOME_METHOD` is any of the existing methods, or a custom method you have written. Begin entering the name of the method, and the appropriate name auto-populates. - ++ //image:credentials-create-machine-credential-priv-escalation.png[image] - ++ ** *empty selection*: If a task or play has `become` set to `yes` and is used with an empty selection, then it will default to `sudo`. ** *sudo*: Performs single commands with superuser (root user) privileges. ** *su*: Switches to the superuser (root user) account (or to other user accounts). diff --git a/downstream/modules/platform/ref-controller-credential-network.adoc b/downstream/modules/platform/ref-controller-credential-network.adoc index 9266b6e47e..a8540ff9a0 100644 --- a/downstream/modules/platform/ref-controller-credential-network.adoc +++ b/downstream/modules/platform/ref-controller-credential-network.adoc @@ -30,8 +30,8 @@ Provide the following information for network credentials: * *Password*: The password to use in conjunction with the network device. * *SSH Private Key*: Copy or drag-and-drop the actual SSH Private Key to be used to authenticate the user to the network through SSH. * *Private Key Passphrase*: The passphrase for the private key to authenticate the user to the network through SSH. -* *Authorize*: Select this from the Options field to control whether or not to enter privileged mode. -* If *Authorize* is checked, enter a password in the *Authorize Password* field to access privileged mode. +* *Authorize*: Select this to control whether or not to enter privileged mode. +** If *Authorize* is checked, enter a password in the *Authorize Password* field to access privileged mode. For more information, see link:https://www.ansible.com/blog/porting-ansible-network-playbooks-with-new-connection-plugins[Porting Ansible Network Playbooks with New Connection Plugins]. diff --git a/downstream/modules/platform/ref-controller-credential-openShift.adoc b/downstream/modules/platform/ref-controller-credential-openShift.adoc index e039916c6b..6dd09f029b 100644 --- a/downstream/modules/platform/ref-controller-credential-openShift.adoc +++ b/downstream/modules/platform/ref-controller-credential-openShift.adoc @@ -4,17 +4,17 @@ Select this credential type to create instance groups that point to a Kubernetes or OpenShift container. -For more information, see link:https://docs.ansible.com/automation-controller/4.4/html/administration/containers_instance_groups.html#ag-ext-exe-env[Container and Instance Groups] in the _{ControllerAG}_. +For more information, see xref:controller-instance-and-container-groups[Instance and container groups]. //image:credentials-create-containers-credential.png[Credentials- create Containers credential] Provide the following information for container credentials: * *OpenShift or Kubernetes API Endpoint* (required): The endpoint used to connect to an OpenShift or Kubernetes container. -* *API Authentication Bearer Token* (required): The token used to authenticate the connection. +* *API authentication bearer token* (required): The token used to authenticate the connection. * Optional: *Verify SSL*: You can check this option to verify the server's SSL/TLS certificate is valid and trusted. Environments that use internal or private _Certificate Authority_ (CA) must leave this option unchecked to disable verification. -* *Certificate Authority Data*: Include the `BEGIN CERTIFICATE` and `END CERTIFICATE` lines when pasting the certificate, if provided. +* *Certificate Authority data*: Include the `BEGIN CERTIFICATE` and `END CERTIFICATE` lines when pasting the certificate, if provided. A container group is a type of instance group that has an associated credential that enables connection to an OpenShift cluster. To set up a container group, you must have the following items: diff --git a/downstream/modules/platform/ref-controller-credential-openStack.adoc b/downstream/modules/platform/ref-controller-credential-openStack.adoc index 344a55a6ec..f1c4219867 100644 --- a/downstream/modules/platform/ref-controller-credential-openStack.adoc +++ b/downstream/modules/platform/ref-controller-credential-openStack.adoc @@ -6,14 +6,15 @@ Select this credential type to enable synchronization of cloud inventory with Op //image:credentials-create-openstack-credential.png[Credentials- create OpenStack credential] -Provide the following information for OpenStack credentials: +Enter the following information for OpenStack credentials: * *Username*: The username to use to connect to OpenStack. * *Password (API Key)*: The password or API key to use to connect to OpenStack. * *Host (Authentication URL)*: The host to be used for authentication. * *Project (Tenant Name)*: The Tenant name or Tenant ID used for OpenStack. This value is usually the same as the username. -* Optional: *Project (Domain Name)*: Provide the project name associated with your domain. -* Optional: *Domain name*: Provide the FQDN to be used to connect to OpenStack. +* Optional: *Project (Domain Name)*: Give the project name associated with your domain. +* Optional: *Domain Name*: Give the FQDN to be used to connect to OpenStack. +* Optional: *Region Name*: Give the region name. For some cloud providers, like OVH, the region must be specified. If you are interested in using OpenStack Cloud Credentials, see xref:controller-cloud-credentials[Use Cloud Credentials with a cloud inventory], which includes a sample playbook. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-credential-terraform.adoc b/downstream/modules/platform/ref-controller-credential-terraform.adoc index 3a2e8306b3..e8917ee4b8 100644 --- a/downstream/modules/platform/ref-controller-credential-terraform.adoc +++ b/downstream/modules/platform/ref-controller-credential-terraform.adoc @@ -7,20 +7,19 @@ Terraform is a HashiCorp tool used to automate various infrastructure tasks. Select this credential type to enable synchronization with the Terraform inventory source. -The Terraform credential requires the *Backend configuration* attribute which must contain the data from a link:https://developer.hashicorp.com/terraform/language/settings/backends/configuration[Terraform backend block]. -You can paste, drag a file, browse to upload a file, or click the image:leftkey.png[Key,15,15] icon to populate the field from an external xref:assembly-controller-secret-management[Secret Management System]. +The Terraform credential requires the *Backend configuration* attribute which must contain the data from a link:https://developer.hashicorp.com/terraform/language/backend[Terraform backend block]. +You can paste, drag a file, browse to upload a file, or click the image:leftkey.png[Key,15,15] icon to populate the field from an external link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/configuring_automation_execution/assembly-controller-secret-management[Secret Management System]. Terraform backend configuration requires the following inputs: * *Name* * Credential type: Select *Terraform backend configuration*. * Optional: *Organization* -* Optional: *Description* -//Not yet available in test env. +* Optional: *Description* * *Backend configuration*: Drag a file here or browse to upload. - ++ Example configuration for an S3 backend: - ++ ---- bucket = "my-terraform-state-bucket" key = "path/to/terraform-state-file" @@ -28,3 +27,5 @@ region = "us-east-1" access_key = "my-aws-access-key" secret_key = "my-aws-secret-access-key" ---- ++ +* Optional: *Google Cloud Platform account credentials* diff --git a/downstream/modules/platform/ref-controller-credential-thycotic-server.adoc b/downstream/modules/platform/ref-controller-credential-thycotic-server.adoc index b6d5e18f92..4d15ec928f 100644 --- a/downstream/modules/platform/ref-controller-credential-thycotic-server.adoc +++ b/downstream/modules/platform/ref-controller-credential-thycotic-server.adoc @@ -4,4 +4,4 @@ This is considered part of the secret management capability. -For more information, see xref:ref-thycotic-secret-server[Thycotic Secret Server]. \ No newline at end of file +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/configuring_automation_execution/assembly-controller-secret-management#ref-thycotic-secret-server[Thycotic Secret Server]. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-credential-thycotic-vault.adoc b/downstream/modules/platform/ref-controller-credential-thycotic-vault.adoc index e5eb58a3be..c94a977fdc 100644 --- a/downstream/modules/platform/ref-controller-credential-thycotic-vault.adoc +++ b/downstream/modules/platform/ref-controller-credential-thycotic-vault.adoc @@ -4,4 +4,4 @@ This is considered part of the secret management capability. -For more information, see xref:ref-thycotic-devops-vault[Thycotic DevOps Secrets Vault]. \ No newline at end of file +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/configuring_automation_execution/assembly-controller-secret-management#ref-thycotic-devops-vault[Thycotic DevOps Secrets Vault]. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-credential-types.adoc b/downstream/modules/platform/ref-controller-credential-types.adoc index 071d95a60a..9eb6687e23 100644 --- a/downstream/modules/platform/ref-controller-credential-types.adoc +++ b/downstream/modules/platform/ref-controller-credential-types.adoc @@ -6,6 +6,10 @@ * xref:ref-controller-credential-aws[Amazon Web Services] * xref:ref-controller-credential-galaxy-hub[{Galaxy}/Automation Hub API Token] +//added AWS Secrets Manager Lookup +* xref:ref-controller-aws-secrets-lookup[AWS Secrets Manager Lookup] +//added Bitbucket Data Center HTTP Access Token +* xref:ref-controller-credential-bitbucket[Bitbucket Data Center HTTP Access Token] * xref:ref-controller-credential-centrify-vault[Centrify Vault Credential Provider Lookup] * xref:ref-controller-credential-container-registry[Container Registry] * xref:ref-controller-credential-cyberark-central[CyberArk Central Credential Provider Lookup] @@ -27,11 +31,12 @@ * xref:ref-controller-credential-satellite[Red Hat Satellite 6] * xref:ref-controller-credential-virtualization[Red Hat Virtualization] * xref:ref-controller-credential-source-control[Source Control] +* xref:ref-controller-credential-terraform[Terraform Backend Configuration] * xref:ref-controller-credential-thycotic-vault[Thycotic DevOps Secrets Vault] * xref:ref-controller-credential-thycotic-server[Thycotic Secret Server] * xref:ref-controller-credential-vault[Vault] * xref:ref-controller-credential-vmware-vcenter[VMware vCenter] -The credential types associated with Centrify, CyberArk, HashiCorp Vault, {Azure} Key Vault, and Thycotic are part of the credential plugins capability that enables an external system to lookup your secrets information. +The credential types associated with AWS Secrets Manager, Centrify, CyberArk, HashiCorp Vault, {Azure} Key Vault, and Thycotic are part of the credential plugins capability that enables an external system to lookup your secrets information. -For more information, see xref:assembly-controller-secret-management[Secrets Management System]. \ No newline at end of file +For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/configuring_automation_execution/assembly-controller-secret-management[Secrets Management System]. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-credential-vault.adoc b/downstream/modules/platform/ref-controller-credential-vault.adoc index 6b2cb2ee1f..c920b3f16f 100644 --- a/downstream/modules/platform/ref-controller-credential-vault.adoc +++ b/downstream/modules/platform/ref-controller-credential-vault.adoc @@ -8,11 +8,11 @@ Select this credential type to enable synchronization of inventory with Ansible Vault credentials require the *Vault Password* and an optional *Vault Identifier* if applying multi-Vault credentialing. -For more information on the Multi-Vault support, refer to the link:https://docs.ansible.com/automation-controller/latest/html/administration/multi-creds-assignment.html#multi-vault-credentials[Multi-Vault Credentials] section of the _{ControllerAG}_. +// For more information about the Multi-Vault support, see the link:https://docs.ansible.com/automation-controller/latest/html/administration/multi-creds-assignment.html#multi-vault-credentials[Multi-Vault Credentials] section of _{ControllerAG}_. You can configure {ControllerName} to ask the user for the password at launch time by selecting *Prompt on launch*. -When you select *Prompt on Launch*, a dialog opens when the job is launched, prompting the user to enter the password. +When you select *Prompt on launch*, a dialog opens when the job is launched, prompting the user to enter the password. [WARNING] ==== diff --git a/downstream/modules/platform/ref-controller-credentials-getting-started.adoc b/downstream/modules/platform/ref-controller-credentials-getting-started.adoc index 67003e71c6..72edf44288 100644 --- a/downstream/modules/platform/ref-controller-credentials-getting-started.adoc +++ b/downstream/modules/platform/ref-controller-credentials-getting-started.adoc @@ -2,7 +2,7 @@ = Getting started with credentials //[ddacosta] This should really be rewritten as a procedure because it includes steps. -From the navigation panel, select {MenuAMCredentials} to access the *Credentials* page. +From the navigation panel, select {MenuAECredentials} to access the *Credentials* page. image:credentials-demo-edit-details.png[Credentials] @@ -38,7 +38,7 @@ A credential with roles associated retains them if the credential is reassigned Click btn:[Add] to assign the *Demo Credential* to additional users. If no users exist, add them by selecting {MenuControllerUsers} from the navigation panel. -For more information, see xref:assembly-controller-users[Users]. +For more information, see [Users]. Select the *Job Templates* tab to display the job templates associated with this credential, and which jobs have run recently using this credential. diff --git a/downstream/modules/platform/ref-controller-existing-security.adoc b/downstream/modules/platform/ref-controller-existing-security.adoc index aed944b5e5..9c8c9ae578 100644 --- a/downstream/modules/platform/ref-controller-existing-security.adoc +++ b/downstream/modules/platform/ref-controller-existing-security.adoc @@ -7,4 +7,4 @@ Use {ControllerName}'s role-based access control (RBAC) to delegate the minimum Use teams in {ControllerName} to assign permissions to groups of users rather than to users individually. .Additional resources -For more information, see link:https://docs.ansible.com/automation-controller/4.4/html/userguide/security.html#rbac-ug[Role-Based Access Controls] in the _{ControllerUG}_. +For more information, see link:https://docs.ansible.com/automation-controller/4.4/html/userguide/security.html#rbac-ug[Role-Based Access Controls] in _{ControllerUG}_. diff --git a/downstream/modules/platform/ref-controller-export-old-scripts.adoc b/downstream/modules/platform/ref-controller-export-old-scripts.adoc index 978e18c9c1..cdee9db91f 100644 --- a/downstream/modules/platform/ref-controller-export-old-scripts.adoc +++ b/downstream/modules/platform/ref-controller-export-old-scripts.adoc @@ -67,5 +67,5 @@ $ ansible-inventory -i ./my_scripts/_11__inventory_script_upperorder --list --ex In the preceding example, you can `cd` into `my_scripts` and then issue a `git init` command, add the scripts you want, push it to source control, and then create an SCM inventory source in the user interface. -For more information on syncing or using custom inventory scripts, see link:https://docs.ansible.com/automation-controller/4.4/html/administration/scm-inv-source.html#ag-inv-import[Inventory file importing] in the _{ControllerAG}_. +For more information about syncing or using custom inventory scripts, see link:{URLControllerAdminGuide}/assembly-inventory-file-importing[Inventory file importing] in _{ControllerAG}_. diff --git a/downstream/modules/platform/ref-controller-extra-variables.adoc b/downstream/modules/platform/ref-controller-extra-variables.adoc index e9b8abee54..e224f939d3 100644 --- a/downstream/modules/platform/ref-controller-extra-variables.adoc +++ b/downstream/modules/platform/ref-controller-extra-variables.adoc @@ -25,7 +25,7 @@ It is possible that this variable, `debug = true`, can be overridden in a job te To ensure the variables that you pass are not overridden, ensure they are included by redefining them in the survey. You can define extra variables at the inventory, group, and host levels. -If you are specifying the `ALLOW_JINJA_IN_EXTRA_VARS` parameter, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#ref-controller-allow-jinja-in-extra-vars[The ALLOW_JINJA_IN_EXTRA_VARS variable] section of the _{ControllerAG}_ to configure it. +If you are specifying the `ALLOW_JINJA_IN_EXTRA_VARS` parameter, see the {URLControllerAdminGuide}/controller-tips-and-tricks#ref-controller-allow-jinja-in-extra-vars[The ALLOW_JINJA_IN_EXTRA_VARS variable] section of _{ControllerAG}_ to configure it. The job template extra variables dictionary is merged with the survey variables. diff --git a/downstream/modules/platform/ref-controller-git-refspec.adoc b/downstream/modules/platform/ref-controller-git-refspec.adoc index 3f93687563..43f4c13a47 100644 --- a/downstream/modules/platform/ref-controller-git-refspec.adoc +++ b/downstream/modules/platform/ref-controller-git-refspec.adoc @@ -2,26 +2,26 @@ = Git Refspec -The *SCM Refspec* field specifies which extra references the update should download from the remote. +The *Source control refspec* field specifies which extra references the update should download from the remote. Examples include the following: * `refs/*:refs/remotes/origin/*`: This fetches all references, including remotes of the remote * `refs/pull/*:refs/remotes/origin/pull/*` (GitHub-specific): This fetches all refs for all pull requests * `refs/pull/62/head:refs/remotes/origin/pull/62/head`: This fetches the ref for one GitHub pull request -For large projects, consider performance impact when using the first or second previous examples. +For large projects, consider performance impact when using the first or second examples. -The *SCM Refspec* parameter affects the availability of the project branch, and can enable access to references not otherwise available. -The previous examples enable you to supply a pull request from the *SCM Branch*, which is not possible without the *SCM Refspec* field. +The *Source control refspec* parameter affects the availability of the project branch, and can enable access to references not otherwise available. +Use the earlier examples to supply a pull request from the *Source control branch*, which is not possible without the *Source control refspec* field. The Ansible git module fetches `refs/heads/` by default. -This means that a project's branches, tags and commit hashes, can be used as the *SCM Branch* if *SCM Refspec* is blank. -The value specified in the *SCM Refspec* field affects which *SCM Branch* fields can be used as overrides. +This means that you can use a project's branches, tags and commit hashes, as the *Source control branch* if *Source control refspec* is blank. +The value specified in the *Source control refspec* field affects which *Source control branch* fields can be used as overrides. Project updates (of any type) perform an extra `git fetch` command to pull that refspec from the remote. .Example You can set up a project that enables branch override with the first or second refspec example. -Use this in a job template that prompts for the *SCM Branch*. +Use this in a job template that prompts for the *Source control branch*. A client can then launch the job template when a new pull request is created, providing the branch `pull/N/head` and the job template can run against the provided GitHub pull request reference. .Additional resources diff --git a/downstream/modules/platform/ref-controller-group-name-vars-filtering.adoc b/downstream/modules/platform/ref-controller-group-name-vars-filtering.adoc index caef96e8a5..d0a8c25ed8 100644 --- a/downstream/modules/platform/ref-controller-group-name-vars-filtering.adoc +++ b/downstream/modules/platform/ref-controller-group-name-vars-filtering.adoc @@ -55,7 +55,7 @@ groups: `limit`: `is_shutdown:&product_dev` + This constructed inventory input creates a group for both categories and uses the `limit` (host pattern) to only return hosts that -are in the intersection of those two groups, which is documented in link:https://docs.ansible.com/ansible/latest/inventory_guide/intro_patterns.html[Patterns:targeting hosts and groups]. +are in the intersection of those two groups, which is documented in link:https://docs.ansible.com/ansible/latest/inventory_guide/intro_patterns.html[Patterns: targeting hosts and groups]. + When a variable is or is not defined (depending on the host), you can give a default. For example, use `| default("running")` if you know what value it should have when it is not defined. diff --git a/downstream/modules/platform/ref-controller-host-details.adoc b/downstream/modules/platform/ref-controller-host-details.adoc index 1298a09e80..d9ee352097 100644 --- a/downstream/modules/platform/ref-controller-host-details.adoc +++ b/downstream/modules/platform/ref-controller-host-details.adoc @@ -1,11 +1,55 @@ [id="controller-host-details"] -= Host Details += Hosts -The *Host Details* window displays the following information about the host affected by the selected event and its associated play and task: +//Does this need to be a procedure or can it be left a ref. + +A system managed by {PlatformNameShort}, which may include a physical, virtual, cloud-based server, or other device. +Typically a host is an operating system instance. +Hosts are grouped in inventories and are sometimes referred to as a “nodes”. + +Ansible works against multiple managed nodes or “hosts” in your infrastructure at the same time, using a list or group of lists known as an inventory. + +Once your inventory is defined, you use patterns to select the hosts or groups you want Ansible to run against. + +== Viewing the host details + +To view the Host details for a job run. + +.Procedure + +From the navigation panel, select {MenuInfrastructureHosts}. +The *Hosts* page displays the following information about the host affected by the selected event and its associated play and task: * The *Host*. -* The *Status*. +* The *Description*. +* The *Inventory* associated with that host. + +Selecting a particular host displays the *Details* page for that host. + +== Creating a host + +To create a new host. +From the navigation panel, select {MenuInfrastructureHosts}. +Click btn:[Create host]. +On the *Create Host* page enter the following information: + +* *Name*: Enter a name for the host. +* (Optional) *Description*: Enter a description for the host. +* *Inventory*: Select the inventory to contain that host from the list. +* *Variables*: Enter the inventory file variables associated with the host. + +Click btn:[Create host] to save your changes. + + +.Procedure + +From the navigation panel, select {MenuInfrastructureHosts}. +The *Hosts* page displays the following information about the host affected by the selected event and its associated play and task: + + + + * The type of run in the *Play* field. * The type of *Task*. * If applicable, the Ansible Module task, and any arguments for that module. diff --git a/downstream/modules/platform/ref-controller-install-builder.adoc b/downstream/modules/platform/ref-controller-install-builder.adoc index c6c730b63c..b09f2ee519 100644 --- a/downstream/modules/platform/ref-controller-install-builder.adoc +++ b/downstream/modules/platform/ref-controller-install-builder.adoc @@ -6,4 +6,6 @@ To build images, you must have Podman or Docker installed, along with the `ansib The `--container-runtime` option must correspond to the Podman or Docker executable you intend to use. -For more information, see link:https://ansible.readthedocs.io/projects/builder/en/latest/#quickstart-for-ansible-builder[Quickstart for Ansible Builder], or link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/creating_and_consuming_execution_environments/index[Creating and consuming execution environments]. +When building an {ExecEnvShort} image, it must support the architecture that {PlatformNameShort} is deployed with. + +For more information, see link:https://ansible.readthedocs.io/projects/builder/en/latest/#quickstart-for-ansible-builder[Quickstart for Ansible Builder], or link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/creating_and_using_execution_environments/index[Creating and consuming execution environments]. diff --git a/downstream/modules/platform/ref-controller-instance-group-capacity.adoc b/downstream/modules/platform/ref-controller-instance-group-capacity.adoc index d5cd78d11c..30890eef74 100644 --- a/downstream/modules/platform/ref-controller-instance-group-capacity.adoc +++ b/downstream/modules/platform/ref-controller-instance-group-capacity.adoc @@ -30,4 +30,5 @@ For container groups, using the `max_forks` value is useful given that all jobs The default `pod_spec` sets requests and not limits, so the pods can "burst" above their requested value without being throttled or reaped. By setting the `max_forks value`, you can help prevent a scenario where too many jobs with large forks values get scheduled concurrently and cause the OpenShift nodes to be oversubscribed with multiple pods using more resources than their requested value. -To set the maximum values for the concurrent jobs and forks in an instance group, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#controller-create-instance-group[Creating an instance group] in the _{ControllerUG}_. +To set the maximum values for the concurrent jobs and forks in an instance group, see +xref:controller-create-instance-group[Creating an instance group]. diff --git a/downstream/modules/platform/ref-controller-instance-group-policies.adoc b/downstream/modules/platform/ref-controller-instance-group-policies.adoc index fa11ad53d3..e1b7f408b7 100644 --- a/downstream/modules/platform/ref-controller-instance-group-policies.adoc +++ b/downstream/modules/platform/ref-controller-instance-group-policies.adoc @@ -20,4 +20,4 @@ image::ug-instance-groups_list_view.png[Instance Groups list view] .Additional resources -For more information, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#controller-instance-groups[Managing Instance Groups] section of the _{ControllerUG}_. \ No newline at end of file +For more information, see the xref:controller-instance-groups[Managing Instance Groups] section. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-internal-services.adoc b/downstream/modules/platform/ref-controller-internal-services.adoc index 0ecdff36fa..2fa2dd8812 100644 --- a/downstream/modules/platform/ref-controller-internal-services.adoc +++ b/downstream/modules/platform/ref-controller-internal-services.adoc @@ -6,7 +6,7 @@ PostgreSQL database:: The connection to the PostgreSQL database is done by password authentication over TCP, either through localhost or remotely (external database). -This connection can use PostgreSQL's built in support for SSL/TLS, as natively configured by the installer support. +This connection can use PostgreSQL's built-in support for SSL/TLS, as natively configured by the installer support. SSL/TLS protocols are configured by the default OpenSSL configuration. A Redis key or value store:: diff --git a/downstream/modules/platform/ref-controller-inventory-plugins.adoc b/downstream/modules/platform/ref-controller-inventory-plugins.adoc index a4dd59ad2a..143e750d8a 100644 --- a/downstream/modules/platform/ref-controller-inventory-plugins.adoc +++ b/downstream/modules/platform/ref-controller-inventory-plugins.adoc @@ -15,6 +15,7 @@ In {Controllername} v4.4, you can provide the inventory plugin configuration dir * xref:proc-controller-inv-source-rh-virt[Red Hat Virtualization] * xref:proc-controller-inv-source-aap[{PlatformName}] * xref:proc-controller-inv-source-terraform[Terraform State] +* xref:proc-controller-inv-source-open-shift-virt[OpenShift Virtualization] Newly created configurations for inventory sources contain the default plugin configuration values. If you want your newly created inventory sources to match the output of a legacy source, you must apply a specific set of configuration values for that source. diff --git a/downstream/modules/platform/ref-controller-isolation-functionality.adoc b/downstream/modules/platform/ref-controller-isolation-functionality.adoc index 7e0b554ab6..de84442a7b 100644 --- a/downstream/modules/platform/ref-controller-isolation-functionality.adoc +++ b/downstream/modules/platform/ref-controller-isolation-functionality.adoc @@ -30,4 +30,4 @@ If your playbooks need to use keys or settings defined in `AWX_ISOLATION_SHOW_PA The fields described here can be found on the *Jobs settings* page: -image:configure-tower-jobs-isolated-jobs-fields.png[image] \ No newline at end of file +image::job-settings-full.png[Jobs settings options] \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-licenses.adoc b/downstream/modules/platform/ref-controller-licenses.adoc index 46dc100524..c37ba42f78 100644 --- a/downstream/modules/platform/ref-controller-licenses.adoc +++ b/downstream/modules/platform/ref-controller-licenses.adoc @@ -2,7 +2,7 @@ = Component licenses -To view the license information for the components included in {ControllerName}, see `/usr/share/doc/automation-controller-/README`. +To view the license information for the components included in {PlatformNameShort}, see `/usr/share/doc/automation-controller-/README`. where `` refers to the version of {ControllerName} you have installed. diff --git a/downstream/modules/platform/ref-controller-logging-settings.adoc b/downstream/modules/platform/ref-controller-logging-settings.adoc new file mode 100644 index 0000000000..8dd4b80659 --- /dev/null +++ b/downstream/modules/platform/ref-controller-logging-settings.adoc @@ -0,0 +1,6 @@ +[id="ref-controller-logging-settings"] + += Logging and aggregation settings + + +For information about these settings, see xref:proc-controller-set-up-logging[Setting up logging]. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-logging-splunk.adoc b/downstream/modules/platform/ref-controller-logging-splunk.adoc index 373459953b..b35a3c5fcd 100644 --- a/downstream/modules/platform/ref-controller-logging-splunk.adoc +++ b/downstream/modules/platform/ref-controller-logging-splunk.adoc @@ -34,6 +34,6 @@ The Splunk HTTP Event Collector listens on port 8088 by default, so you must pro Typical values are shown in the following example: -image:logging-splunk-tower-example.png[Splunk logging example] +image:logging-splunk-controller-example.png[Splunk logging example] For more information on configuring the HTTP Event Collector, see the link:https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector[Splunk documentation]. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-node-counting.adoc b/downstream/modules/platform/ref-controller-node-counting.adoc index 011985ee69..d60e5f125a 100644 --- a/downstream/modules/platform/ref-controller-node-counting.adoc +++ b/downstream/modules/platform/ref-controller-node-counting.adoc @@ -2,11 +2,11 @@ = Node counting in licenses -The {ControllerName} license defines the number of Managed Nodes that can be managed as part of a {PlatformName} subscription. +The {PlatformNameShort} license defines the number of Managed Nodes that can be managed as part of your subscription. A typical license says "License Count: 500", which sets the maximum number of Managed Nodes at 500. -For more information about managed node requirements for licensing, see https://access.redhat.com/articles/3331481. +For more information about managed node requirements for licensing, see link:https://access.redhat.com/articles/3331481[How are "managed nodes" defined as part of the {PlatformName} offering]. [NOTE] ==== diff --git a/downstream/modules/platform/ref-controller-old-job-history.adoc b/downstream/modules/platform/ref-controller-old-job-history.adoc index 92ac387ae8..13212b045e 100644 --- a/downstream/modules/platform/ref-controller-old-job-history.adoc +++ b/downstream/modules/platform/ref-controller-old-job-history.adoc @@ -19,4 +19,4 @@ jobs. For more information, see xref:proc-controller-scheduling-deletion[Scheduling deletion]. -You can also set or review notifications associated with this management job in the same way as described in xref:proc-controller-management-notifications[Notifications] for activity stream management jobs, or for more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-notifications[Notifications] in the _{ControllerUG}_. +You can also set or review notifications associated with this management job in the same way as described in xref:proc-controller-management-notifications[Notifications] for activity stream management jobs, or for more information, see link:{URLControllerUserGuide}/controller-notifications[Notifiers] in _{ControllerUG}_. diff --git a/downstream/modules/platform/ref-controller-organization-mapping.adoc b/downstream/modules/platform/ref-controller-organization-mapping.adoc index f6b8092068..21957bf32e 100644 --- a/downstream/modules/platform/ref-controller-organization-mapping.adoc +++ b/downstream/modules/platform/ref-controller-organization-mapping.adoc @@ -1,55 +1,25 @@ +:_mod-docs-content-type: PROCEDURE + [id="ref-controller-organization-mapping"] = Organization mapping -You must control which users are placed into which {ControllerName} organizations based on their username and email address (distinguishing your organization administrators and users from social or enterprise-level authentication accounts). - -Dictionary keys are organization names. -Organizations are created, if not already present, and if the license permits multiple organizations. -Otherwise, the single default organization is used regardless of the key. - -Values are dictionaries defining the options for each organization's membership. -For each organization, you can specify which users are automatically users of the organization and also which users can administer the organization. - -*admins*: None, True/False, string or list/tuple of strings: - -* If *None*, organization administrators are not updated. -* If *True*, all users using account authentication are automatically added as administrators of the organization. -* If *False*, no account authentication users are automatically added as administrators of the organization. -* If a string or list of strings, specifies the usernames and emails for users to be added to the organization, strings beginning and ending with `/` are compiled into regular expressions. -The modifiers `i` (case-insensitive) and `m` (multi-line) can be specified after the ending `/`. - -*remove_admins*: True/False. Defaults to *True*: - -* When *True*, a user who does not match is removed from the organization's administrative list. -* *users*: None, True/False, string or list/tuple of strings. The same rules apply as for *admins*. -* *remove_users*: True/False. Defaults to *True*. The same rules apply as for *remove_admins*. - -[literal, options="nowrap" subs="+attributes"] ----- -{ - "Default": { - "users": true - }, - "Test Org": { - "admins": ["admin@example.com"], - "users": true - }, - "Test Org 2": { - "admins": ["admin@example.com", "/^controller-[^@]+?@.*$/i"], - "users": "/^[^@].*?@example\\.com$/" - } -} ----- - -Organization mappings can be specified separately for each account authentication backend. -If defined, these configurations take precedence over the global configuration above. - -[literal, options="nowrap" subs="+attributes"] ----- -SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP = {} -SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP = {} -SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP = {} -SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP = {} -SOCIAL_AUTH_SAML_ORGANIZATION_MAP = {} ----- \ No newline at end of file +You can control which users are placed into which {PlatformNameShort} organizations based on attributes like their username and email address or based on groups provided from an authenticator. + +When organization mapping is positively evaluated, a specified organization is created, if it does not exist if the authenticator tied to the map is allowed to create objects. + +.Procedure + +. After configuring the authentication details for your authentication type, select *Organization* from the *Add authentication mapping* list. +. Enter a unique rule *Name* to identify the rule. +. Select a *Trigger* from the list. See xref:gw-authenticator-map-triggers[Authenticator map triggers] for more information about map triggers. +. Select *Revoke* to deny user access to the system when none of the trigger conditions are matched. +. Select the *Organization* to which matching users are added or blocked. +. Select a *Role* to be applied or removed for matching users (for example, *Organization Admin* or *Organization Member*). +. Click btn:[Next]. + +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-mapping-next-steps.adoc[] + + diff --git a/downstream/modules/platform/ref-controller-organization-notifications.adoc b/downstream/modules/platform/ref-controller-organization-notifications.adoc index ebeeae3994..2f2abdf67b 100644 --- a/downstream/modules/platform/ref-controller-organization-notifications.adoc +++ b/downstream/modules/platform/ref-controller-organization-notifications.adoc @@ -1,14 +1,17 @@ -[id="red-controller-oganization-notifications"] +:_mod-docs-content-type: REFERENCE -= Work with Notifications +[id="ref-controller-organization-notifications"] -Selecting the *Notifications* tab on the Organization details page enables you to review any notification integrations you have set up. += Working with notifiers -image:organizations-notifications-samples-list.png[Notifications] +When {ControllerName} is enabled on the platform, you can review any notifier integrations you have set up and manage their settings within the organization resource. -Use the toggles to enable or disable the notifications to use with your particular organization. -For more information, see xref:controller-enable-disable-notifications[Enable and Disable Notifications]. +.Procedure +. From the navigation panel, select {MenuAMOrganizations}. +. From the *Organizations* list view, select the organization to which you want to manage notifications. +//ddacosta - this might change to Notifiers tab. +. Select the *Notification* tab. +. Use the toggles to enable or disable the notifications to use with your particular organization. For more information, see link:{URLControllerUserGuide}/controller-notifications#controller-enable-disable-notifications[Enable and disable notifications]. +. If no notifiers have been set up, select {MenuAEAdminJobNotifications} from the navigation panel. -If no notifications have been set up, select {MenuAEAdminJobNotifications} from the navigation panel. - -For information on configuring notification types, see xref:controller-notification-types[Notification Types]. +For information on configuring notification types, see link:{URLControllerUserGuide}/controller-notifications#controller-notification-types[Notification types]. diff --git a/downstream/modules/platform/ref-controller-performance-troubleshooting.adoc b/downstream/modules/platform/ref-controller-performance-troubleshooting.adoc index 6b4ce0b3f8..35e79ae1b3 100644 --- a/downstream/modules/platform/ref-controller-performance-troubleshooting.adoc +++ b/downstream/modules/platform/ref-controller-performance-troubleshooting.adoc @@ -12,13 +12,13 @@ * Job output streams from the execution node where the ansible-playbook is actually run to the associated control node. Then the callback receiver serializes this data and writes it to the database. Relevant settings to observe and tune can be found in xref:ref-controller-settings-job-events[Settings for managing job event processing] and xref:ref-controller-database-settings[PostgreSQL database configuration and maintenance for {ControllerName}]. * In general, to resolve this symptom it is important to observe the CPU and memory use of the control nodes. If CPU or memory use is very high, you can either horizontally scale the control plane by deploying more virtual machines to be control nodes that naturally spreads out work more, or to modify the number of jobs a control node will manage at a time. For more information, see xref:ref-controller-settings-control-execution-nodes[Capacity settings for control and execution nodes] for more information. -*What can I do to increase the number of jobs that {ControllerName} can run concurrently?* +*What can you do to increase the number of jobs that {ControllerName} can run concurrently?* * Factors that cause jobs to remain in “pending” state are: ** *Waiting for “dependencies” to finish*: this includes project updates and inventory updates when “update on launch” behavior is enabled. ** *The “allow_simultaneous” setting of the job template*: if multiple jobs of the same job template are in “pending” status, check the “allow_simultaneous” setting of the job template (“Concurrent Jobs” checkbox in the UI). If this is not enabled, only one job from a job template can run at a time. ** *The “forks” value of your job template*: the default value is 5. The amount of capacity required to run the job is roughly the forks value (some small overhead is accounted for). If the forks value is set to a very large number, this will limit what nodes will be able to run it. -** *Lack of either control or execution capacity*: see “awx_instance_remaining_capacity” metric from the application metrics available on /api/v2/metrics. See xref:ref-controller-metrics-monitoring[Metrics for monitoring {ControllerName} application] for more information about how to monitor metrics. See xref:ref-controller-capacity-planning[Capacity planning for deploying {ControllerName}] for information on how to plan your deployment to handle the number of jobs you are interested in. +** *Lack of either control or execution capacity*: see “awx_instance_remaining_capacity” metric from the application metrics available on /api/v2/metrics. See xref:ref-controller-metrics-monitoring[Metrics for monitoring {ControllerName} application] for more information about how to check metrics. See xref:ref-controller-capacity-planning[Capacity planning for deploying {ControllerName}] for information about how to plan your deployment to handle the number of jobs you are interested in. *Jobs run more slowly on {ControllerName} than on a local machine.* @@ -27,9 +27,10 @@ * Size of projects can impact how long it takes to start the job, as the project is updated on the control node and transferred to the execution node. Internal cluster routing can impact network performance. For more information, see xref:ref-controller-internal-cluster-routing[Internal cluster routing]. * Container pull settings can impact job start time. The {ExecEnvShort} is a container that is used to run jobs within it. Container pull settings can be set to “Always”, “Never” or “If not present”. If the container is always pulled, this can cause delays. -* Ensure that all cluster nodes, including execution, control, and the database, have been deployed in instances with storage rated to the minimum required IOPS, because the manner in which {ControllerName} runs ansible and caches event data implicates significant disk I/O. For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_planning_guide/platform-system-requirements#red_hat_ansible_automation_platform_system_requirements[Red Hat Ansible Automation Platform system requirements]. + +* Ensure that all cluster nodes, including execution, control, and the database, have been deployed in instances with storage rated to the minimum required IOPS, because the manner in which {ControllerName} runs ansible and caches event data implicates significant disk I/O. For more information, see link:{URLPlanningGuide}/platform-system-requirements[System requirements]. *Database storage does not stop growing.* -* {ControllerNameStart} has a management job titled “Cleanup Job Details”. By default, it is set to keep 120 days of data and to run once a week. To reduce the amount of data in the database, you can shorten the retention time. For more information, see xref:proc-controller-remove-old-activity-stream[Removing Old Activity Stream Data]. +* {ControllerNameStart} has a management job titled “Cleanup Job Details”. By default, it is set to keep 120 days of data and to run once a week. To reduce the amount of data in the database, you can shorten the retention time. For more information, see xref:proc-controller-remove-old-activity-stream[Removing old activity stream data]. * Running the cleanup job deletes the data in the database. However, the database must at some point perform its vacuuming operation which reclaims storage. See xref:ref-controller-database-settings[PostgreSQL database configuration and maintenance for {ControllerName}] for more information about database vacuuming. diff --git a/downstream/modules/platform/ref-controller-playbook-run-search.adoc b/downstream/modules/platform/ref-controller-playbook-run-search.adoc index b17c53c21b..42fa68e586 100644 --- a/downstream/modules/platform/ref-controller-playbook-run-search.adoc +++ b/downstream/modules/platform/ref-controller-playbook-run-search.adoc @@ -7,18 +7,18 @@ To filter only certain hosts with a particular status, specify one of the follow ok:: Indicates that a task completed successfully but no change was executed on the host. changed:: The playbook task executed. -Since Ansible tasks should be written to be idempotent, tasks may exit successfully without executing anything on the host. +Since Ansible tasks should be written to be idempotent, tasks can exit successfully without executing anything on the host. In these cases, the task returns *ok*, but not *changed*. failed:: The task failed. Further playbook execution stopped for this host. -unreachable:: The host is unreachable from the network or has another fatal error associated with it. +unreachable:: The host is unreachable from the network or has another unrecoverable error associated with it. skipped:: The playbook task skipped because no change was necessary for the host to reach the target state. rescued:: This shows the tasks that failed and then executes a rescue section. ignored:: This shows the tasks that failed and have `ignore_errors: yes configured`. -These statuses also display in each *Stdout* pane, in a group of "stats" called the host summary fields: +//These statuses also display in each *Stdout* pane, in a group of "stats" called the host summary fields: -image::ug-job-std-out-host-summary-status.png[Host summary status] +//image::ug-job-std-out-host-summary-status.png[Host summary status] The following example shows a search with only unreachable hosts: diff --git a/downstream/modules/platform/ref-controller-proj-sign-prerequisites.adoc b/downstream/modules/platform/ref-controller-proj-sign-prerequisites.adoc index c5a3697825..01adc8d7bd 100644 --- a/downstream/modules/platform/ref-controller-proj-sign-prerequisites.adoc +++ b/downstream/modules/platform/ref-controller-proj-sign-prerequisites.adoc @@ -8,8 +8,8 @@ + [literal, options="nowrap" subs="+attributes"] ---- -ansible-automation-platform-2.4-for-rhel-8-x86_64-rpms for RHEL 8 -ansible-automation-platform-2.4-for-rhel-9-x86_64-rpms for RHEL 9 +ansible-automation-platform-2.5-for-rhel-8-x86_64-rpms for RHEL 8 +ansible-automation-platform-2.5-for-rhel-9-x86_64-rpms for RHEL 9 ---- * A valid GPG public or private keypair is required for signing content. For more information, see link:https://www.redhat.com/sysadmin/creating-gpg-keypairs[How to create GPG keypairs]. diff --git a/downstream/modules/platform/ref-controller-prompted-vault-credentials.adoc b/downstream/modules/platform/ref-controller-prompted-vault-credentials.adoc index b98baea532..35e4e5e14e 100644 --- a/downstream/modules/platform/ref-controller-prompted-vault-credentials.adoc +++ b/downstream/modules/platform/ref-controller-prompted-vault-credentials.adoc @@ -32,4 +32,4 @@ POST /api/v2/job_templates/N/launch/ Instead of uploading sensitive credential information into {ControllerName}, you can link credential fields to external systems and use them to run your playbooks. -For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#assembly-controller-secret-management[Secret Management System] in the {ControllerUG}. +For more information, see xref:assembly-controller-secret-management[Secret Management System]. diff --git a/downstream/modules/platform/ref-controller-refresh-existing-token.adoc b/downstream/modules/platform/ref-controller-refresh-existing-token.adoc index 48e8448d1e..eee9ddcf26 100644 --- a/downstream/modules/platform/ref-controller-refresh-existing-token.adoc +++ b/downstream/modules/platform/ref-controller-refresh-existing-token.adoc @@ -19,14 +19,14 @@ The following example shows an existing access token with a refresh token provid } ---- -The `/api/o/token/` endpoint is used for refreshing the access token: +The `/o/token/` endpoint is used for refreshing the access token: [literal, options="nowrap" subs="+attributes"] ---- curl -X POST \ -d "grant_type=refresh_token&refresh_token=AL0NK9TTpv0qp54dGbC4VUZtsZ9r8z" \ -u "gwSPoasWSdNkMDtBN3Hu2WYQpPWCO9SwUEsKK22l:fI6ZpfocHYBGfm1tP92r0yIgCyfRdDQt0Tos9L8a4fNsJjQQMwp9569eIaUBsaVDgt2eiwOGe0bg5m5vCSstClZmtdy359RVx2rQK5YlIWyPlrolpt2LEpVeKXWaiybo" \ - http:///api/o/token/ -i + http:///o/token/ -i ---- Where `refresh_token` is provided by `refresh_token` field of the preceding access token. @@ -36,7 +36,7 @@ The authentication information is of format `:`, where [NOTE] ==== The special OAuth 2 endpoints only support using the `x-www-form-urlencoded` *Content-type*, so as a result, none of the -`api/o/*` endpoints accept `application/json`. +`/o/*` endpoints accept `application/json`. ==== On success, a response displays in JSON format containing the new (refreshed) access token with the same scope information as the previous one: @@ -60,4 +60,4 @@ Strict-Transport-Security: max-age=15768000 The refresh operation replaces the existing token by deleting the original and then immediately creating a new token with the same scope and related application as the original one. -Verify that the new token is present and the old one is deleted in the `/api/v2/tokens/` endpoint. +Verify that the new token is present and the old one is deleted in the `gateway/api/v1/tokens/` endpoint. diff --git a/downstream/modules/platform/ref-controller-revoke-access-token.adoc b/downstream/modules/platform/ref-controller-revoke-access-token.adoc index 2e5b018bff..612005d6eb 100644 --- a/downstream/modules/platform/ref-controller-revoke-access-token.adoc +++ b/downstream/modules/platform/ref-controller-revoke-access-token.adoc @@ -2,7 +2,7 @@ = Revoke an access token -You can revoke an access token by using the `/api/o/revoke-token/` endpoint. +You can revoke an access token by deleting the token in the platform UI, or by using the `/o/revoke-token/` endpoint. Revoking an access token by this method is the same as deleting the token resource object, but it enables you to delete a token by providing its token value, and the associated `client_id` (and `client_secret` if the application is `confidential`). For example: @@ -10,24 +10,21 @@ Revoking an access token by this method is the same as deleting the token resour ---- curl -X POST -d "token=rQONsve372fQwuc2pn76k3IHDCYpi7" \ -u "gwSPoasWSdNkMDtBN3Hu2WYQpPWCO9SwUEsKK22l:fI6ZpfocHYBGfm1tP92r0yIgCyfRdDQt0Tos9L8a4fNsJjQQMwp9569eIaUBsaVDgt2eiwOGe0bg5m5vCSstClZmtdy359RVx2rQK5YlIWyPlrolpt2LEpVeKXWaiybo" \ -http:///api/o/revoke_token/ -i +http:///o/revoke_token/ -i ---- [NOTE] ==== * The special OAuth 2 endpoints only support using the `x-www-form-urlencoded` *Content-type*, so as a result, none of the -`api/o/*` endpoints accept `application/json`. +`/o/*` endpoints accept `application/json`. * The *Allow External Users to Create Oauth2 Tokens* (`ALLOW_OAUTH2_FOR_EXTERNAL_USERS` in the API) setting is disabled by default. External users refer to users authenticated externally with a service such as LDAP, or any of the other SSO services. This setting ensures external users cannot create their own tokens. If you enable then disable it, any tokens created by external users in the meantime will still exist, and are not automatically revoked. +This setting can be configured from the {MenuSetGateway} menu. ==== Alternatively, to revoke OAuth2 tokens, you can use the `manage` utility, see xref:ref-controller-revoke-oauth2-token[Revoke oauth2 tokens]. -This setting can be configured at the system-level in the UI: - -image:configure-controller-system-oauth2-tokens-toggle.png[image] - On success, a response of `200 OK` is displayed. -Verify the deletion by checking whether the token is present in the `/api/v2/tokens/` endpoint. \ No newline at end of file +Verify the deletion by checking whether the token is present in the `gateway/api/v1/tokens/` endpoint. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-revoke-oauth2-token.adoc b/downstream/modules/platform/ref-controller-revoke-oauth2-token.adoc index 888c573fe2..495543eb7e 100644 --- a/downstream/modules/platform/ref-controller-revoke-oauth2-token.adoc +++ b/downstream/modules/platform/ref-controller-revoke-oauth2-token.adoc @@ -11,26 +11,26 @@ To revoke all existing OAuth2 tokens use the following command: [literal, options="nowrap" subs="+attributes"] ---- -$ awx-manage revoke_oauth2_tokens +$ aap-gateway-manage revoke_oauth2_tokens ---- To revoke all OAuth2 tokens and their refresh tokens use the following command: [literal, options="nowrap" subs="+attributes"] ---- -$ awx-manage revoke_oauth2_tokens --revoke_refresh +$ aap-gateway-manage revoke_oauth2_tokens --revoke_refresh ---- To revoke all OAuth2 tokens for the user with `id=example_user` (specify the username for `example_user`): [literal, options="nowrap" subs="+attributes"] ---- -$ awx-manage revoke_oauth2_tokens --user example_user +$ aap-gateway-manage revoke_oauth2_tokens --user example_user ---- To revoke all OAuth2 tokens and refresh token for the user with `id=example_user`: [literal, options="nowrap" subs="+attributes"] ---- -$ awx-manage revoke_oauth2_tokens --user example_user --revoke_refresh +$ aap-gateway-manage revoke_oauth2_tokens --user example_user --revoke_refresh ---- diff --git a/downstream/modules/platform/ref-controller-run-a-playbook.adoc b/downstream/modules/platform/ref-controller-run-a-playbook.adoc index dccf4c2593..1c9c0e653e 100644 --- a/downstream/modules/platform/ref-controller-run-a-playbook.adoc +++ b/downstream/modules/platform/ref-controller-run-a-playbook.adoc @@ -1,12 +1,12 @@ [id="controller-run-a-playbook"] -= Unable to run a playbook +//= Unable to run a playbook -If you are unable to run the `helloworld.yml` example playbook from the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/getting_started_with_automation_controller/index#controller-projects[Managing projects] section of the _{ControllerGS}_ guide due to playbook errors, try the following: +//If you are unable to run the `helloworld.yml` example playbook from the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/getting_started_with_automation_controller/index#controller-projects[Managing projects] section of the _{ControllerGS}_ guide due to playbook errors, try the following: -* Ensure that you are authenticating with the user currently running the commands. -If not, check how the username has been set up or pass the `--user=username` or `-u username` commands to specify a user. -* Is your YAML file correctly indented? -You might need to line up your whitespace correctly. -Indentation level is significant in YAML. -You can use `yamlint` to check your playbook. +//* Ensure that you are authenticating with the user currently running the commands. +//If not, check how the username has been set up or pass the `--user=username` or `-u username` commands to specify a user. +//* Is your YAML file correctly indented? +//You might need to line up your whitespace correctly. +//Indentation level is significant in YAML. +//You can use `yamlint` to check your playbook. diff --git a/downstream/modules/platform/ref-controller-scm-inv-source-fields.adoc b/downstream/modules/platform/ref-controller-scm-inv-source-fields.adoc index adb028d2c1..2679c29d94 100644 --- a/downstream/modules/platform/ref-controller-scm-inv-source-fields.adoc +++ b/downstream/modules/platform/ref-controller-scm-inv-source-fields.adoc @@ -17,7 +17,7 @@ Additionally: * In cases where you have a large project (around 10 GB), disk space on `/tmp` can be an issue. You can specify a location manually in the {ControllerName} UI from the *Add source* page of an inventory. -Refer to link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#proc-controller-add-source[Adding a source] for instructions on creating an inventory source. +Refer to link:{URLControllerUserGuide}/controller-inventories#proc-controller-add-source[Adding a source] for instructions on creating an inventory source. When you update a project, refresh the listing to use the latest source control management (SCM) information. If no inventory sources use a project as an SCM inventory source, then the inventory listing might not be refreshed on update. @@ -35,3 +35,64 @@ You can perform an inventory update while a related job is running. == Supported File Syntax {ControllerNameStart} uses the `ansible-inventory` module from Ansible to process inventory files, and supports all valid inventory syntax that {ControllerName} requires. + +[IMPORTANT] +==== +You do not need to write inventory scripts in Python. +You can enter any executable file in the source field and must run `chmod +x` for that file and check it into Git. +==== + +The following is a working example of JSON output that {ControllerName} can read for the import: + +---- +{ + "_meta": { + "hostvars": { + "host1": { + "fly_rod": true + } + } + }, + "all": { + "children": [ + "groupA", + "ungrouped" + ] + }, + "groupA": { + "hosts": [ + "host1", + "host10", + "host11", + "host12", + "host13", + "host14", + "host15", + "host16", + "host17", + "host18", + "host19", + "host2", + "host20", + "host21", + "host22", + "host23", + "host24", + "host25", + "host3", + "host4", + "host5", + "host6", + "host7", + "host8", + "host9" + ] + } +} +---- + +.Additional resources + +* For examples of inventory files, see link:https://github.com/ansible/test-playbooks/tree/main/inventories[test-playbooks/inventories]. +* For an example of an inventory script inside of that, see link:https://github.com/ansible/test-playbooks/blob/main/inventories/changes.py[inventories/changes.py]. +* For information about how to implement the inventory script, see the support article, link:https://access.redhat.com/solutions/6997130[How to migrate inventory scripts from Red Hat Ansible tower to Red Hat Ansible Automation Platform?]. diff --git a/downstream/modules/platform/ref-controller-scm-inventory-details.adoc b/downstream/modules/platform/ref-controller-scm-inventory-details.adoc index 868a6909aa..b208a6a396 100644 --- a/downstream/modules/platform/ref-controller-scm-inventory-details.adoc +++ b/downstream/modules/platform/ref-controller-scm-inventory-details.adoc @@ -2,9 +2,9 @@ = SCM inventory details -To view details about the job execution and its associated project, select the *Access* tab. +To view details about the job execution and its associated project, select the *Details* tab. -image::ug-details-for-scm-job.png[Details for SCM job] +//image::ug-details-for-scm-job.png[Details for SCM job] You can view the following details for an executed job: @@ -25,4 +25,4 @@ Reasons for SCM jobs not being ready include dependencies that are currently run * *Instance Group*: Indicates the instance group on which the job ran, if specified. * *Job Tags*: Tags show the various job operations executed. -Selecting these items enables you to view the corresponding job templates, projects, and other objects. +Select these items to view the corresponding job templates, projects, and other objects. diff --git a/downstream/modules/platform/ref-controller-smart-host-filter.adoc b/downstream/modules/platform/ref-controller-smart-host-filter.adoc index 62dc160fec..274b5a6f1d 100644 --- a/downstream/modules/platform/ref-controller-smart-host-filter.adoc +++ b/downstream/modules/platform/ref-controller-smart-host-filter.adoc @@ -4,7 +4,7 @@ You can use a search filter to populate hosts for an inventory. This feature uses the fact searching feature. -{ControllerNameStart} stores facts generated by an Ansible playbook during a Job Template in the database whenever `use_fact_cache=True` is set per-Job Template. +{ControllerNameStart} stores facts generated by an Ansible Playbook during a Job Template in the database whenever `use_fact_cache=True` is set per-Job Template. New facts are merged with existing facts and are per-host. These stored facts can be used to filter hosts with the `/api/v2/hosts` endpoint, using the `GET` query parameter `host_filter`. diff --git a/downstream/modules/platform/ref-controller-subscription-types.adoc b/downstream/modules/platform/ref-controller-subscription-types.adoc index cf3017b14a..5222faf18a 100644 --- a/downstream/modules/platform/ref-controller-subscription-types.adoc +++ b/downstream/modules/platform/ref-controller-subscription-types.adoc @@ -18,7 +18,7 @@ ** Review the SLA at link:https://access.redhat.com/support/offerings/production/sla[Product Support Terms of Service] ** Review the link:https://access.redhat.com/support/policy/severity[Red Hat Support Severity Level Definitions] -All subscription levels include regular updates and releases of {ControllerName}, Ansible, and any other components of the Platform. +All subscription levels include regular updates and releases of {ControllerName}, Ansible, and any other components of the {PlatformNameShort}. For more information, contact Ansible through the link:https://access.redhat.com/[Red Hat Customer Portal] -or at http://www.ansible.com/contact-us/. +or at the link:http://www.ansible.com/contact-us/[Ansible site]. diff --git a/downstream/modules/platform/ref-controller-system-requirements.adoc b/downstream/modules/platform/ref-controller-system-requirements.adoc index e0917a5400..5a6f6268e0 100644 --- a/downstream/modules/platform/ref-controller-system-requirements.adoc +++ b/downstream/modules/platform/ref-controller-system-requirements.adoc @@ -7,68 +7,40 @@ In the installer, four node types are provided as abstractions to help you desig Use the following recommendations for node sizing: -[NOTE] -==== -On control and hybrid nodes, allocate a minimum of 20 GB to `/var/lib/awx` for {ExecEnvShort} storage. -==== - *Execution nodes* Execution nodes run automation. Increase memory and CPU to increase capacity for running more forks. [NOTE] ==== -* The RAM and CPU resources stated might not be required for packages installed on an execution node but are the minimum recommended to handle the job load for a node to run an average number of jobs simultaneously. +* The RAM and CPU resources stated are minimum recommendations to handle the job load for a node to run an average number of jobs simultaneously. * Recommended RAM and CPU node sizes are not supplied. The required RAM or CPU depends directly on the number of jobs you are running in that environment. -For further information about required RAM and CPU levels, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/assembly-controller-improving-performance[Performance tuning for automation controller]. -==== +* For capacity based on forks in your configuration, see link:{URLControllerUserGuide}/controller-jobs#controller-capacity-determination[{ControllerNameStart} capacity determination and job impact]. -.Execution nodes - -[cols="a,a",options="header"] -|=== -h| Requirement | Minimum required -| *RAM* | 16 GB -| *CPUs* | 4 -| *Local disk* | 40GB minimum -|=== +For further information about required RAM and CPU levels, see link:{URLControllerAdminGuide}/assembly-controller-improving-performance[Performance tuning for automation controller]. +==== *Control nodes* Control nodes process events and run cluster jobs including project updates and cleanup jobs. Increasing CPU and memory can help with job event processing. -.Control nodes +//Control nodes have the following storage requirements: -[cols="a,a",options="header"] -|=== -h| Requirement | Minimum required -| *RAM* | 16 GB -| *CPUs* | 4 -| *Local disk* a| * 40GB minimum with at least 20GB available under /var/lib/awx * Storage volume must be rated for a minimum baseline of 1500 IOPS * Projects are stored on control and hybrid nodes, and for the duration of jobs, are also stored on execution nodes. If the cluster has many large projects, consider doubling the GB in /var/lib/awx/projects, to avoid disk space errors. -|=== *Hop nodes* Hop nodes serve to route traffic from one part of the {AutomationMesh} to another (for example, a hop node could be a bastion host into another network). RAM can affect throughput, CPU activity is low. Network bandwidth and latency are generally a more important factor than either RAM or CPU. -.Hop nodes - -[cols="a,a",options="header"] -|=== -h| Requirement | Minimum required -| *RAM* | 16 GB -| *CPUs* | 4 -| *Local disk* | 40 GB -|=== - -* Actual RAM requirements vary based on how many hosts {ControllerName} will manage simultaneously (which is controlled by the `forks` parameter in the job template or the system `ansible.cfg` file). -To avoid possible resource conflicts, Ansible recommends 1 GB of memory per 10 forks and 2 GB reservation for {ControllerName}. For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-jobs#controller-capacity-determination[{ControllerNameStart} capacity determination and job impact]. If `forks` is set to 400, 42 GB of memory is recommended. +* Actual RAM requirements vary based on how many hosts {ControllerName} manages simultaneously (which is controlled by the `forks` parameter in the job template or the system `ansible.cfg` file). +To avoid possible resource conflicts, Ansible recommends 1 GB of memory per 10 forks and 2 GB reservation for {ControllerName}. +See link:{URLControllerUserGuide}/controller-jobs#controller-capacity-determination[{ControllerNameStart} capacity determination and job impact]. +If `forks` is set to 400, 42 GB of memory is recommended. * {ControllerNameStart} hosts check if `umask` is set to 0022. If not, the setup fails. Set `umask=0022` to avoid this error. * A larger number of hosts can be addressed, but if the fork number is less than the total host count, more passes across the hosts are required. You can avoid these RAM limitations by using any of the following approaches: ** Use rolling updates. @@ -78,5 +50,5 @@ To avoid possible resource conflicts, Ansible recommends 1 GB of memory per 10 f [role="_additional-resources"] .Additional resources -* For more information about obtaining an {ControllerName} subscription, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_user_guide/controller-managing-subscriptions#controller-importing-subscriptions[Importing a subscription]. +* For more information about obtaining an {ControllerName} subscription, see link:{URLCentralAuth}/assembly-gateway-licensing#proc-attaching-subscriptions[Attaching your {PlatformName} subscription]. * For questions, contact Ansible support through the link:https://access.redhat.com/[Red Hat Customer Portal]. diff --git a/downstream/modules/platform/ref-controller-team-mapping.adoc b/downstream/modules/platform/ref-controller-team-mapping.adoc index 1f32a5dc4b..8f20744808 100644 --- a/downstream/modules/platform/ref-controller-team-mapping.adoc +++ b/downstream/modules/platform/ref-controller-team-mapping.adoc @@ -1,60 +1,31 @@ +:_mod-docs-content-type: PROCEDURE + [id="ref-controller-team-mapping"] = Team mapping -Team mapping is the mapping of team members (users) from social authentication accounts. -Keys are team names (which are created if not present). -Values are dictionaries of options for each team's membership, where each can contain the following parameters: - -* *organization*: String. The name of the organization to which the team belongs. -The team is created if the combination of organization and team name does not exist. -The organization is created first if it does not exist. -If the license does not permit multiple organizations, the team is always assigned to the single default organization. - -* *users*: None, True/False, string or list/tuple of strings. - -*** If *None*, team members are not updated. -*** If *True*, all social authentication users are added as team members. -*** If *False*, all social authentication users are removed as team members. -* If a string or list of strings, specifies expressions used to match users, the user is added as a team member if the username or email matches. -Strings beginning and ending with `/` are compiled into regular expressions. -The modifiers `i` (case-insensitive) and `m` (multi-line) can be specified after the ending `/`. - -*remove*: True/False. Defaults to *True*. When *True*, a user who does not match the preceding rules is removed from the team. - -[literal, options="nowrap" subs="+attributes"] ----- -{ - "My Team": { - "organization": "Test Org", - "users": ["/^[^@]+?@test\\.example\\.com$/"], - "remove": true - }, - "Other Team": { - "organization": "Test Org 2", - "users": ["/^[^@]+?@test\\.example\\.com$/"], - "remove": false - } -} ----- - -Team mappings can be specified separately for each account authentication backend, based on which of these you set up. -When defined, these configurations take precedence over the preceding global configuration. - -[literal, options="nowrap" subs="+attributes"] ----- -SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP = {} -SOCIAL_AUTH_GITHUB_TEAM_MAP = {} -SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP = {} -SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP = {} -SOCIAL_AUTH_SAML_TEAM_MAP = {} ----- - -Uncomment the following line, that is, set `SOCIAL_AUTH_USER_FIELDS` to an empty list, to prevent new user accounts from being created. - -[literal, options="nowrap" subs="+attributes"] ----- -SOCIAL_AUTH_USER_FIELDS = [] ----- - -Only users who have previously logged in to {ControllerName} using social or enterprise-level authentication, or have a user account with a matching email address can then login. +Team mapping is the mapping of team members (users) from authenticators. + +You can define the options for each team’s membership. For each team, you can specify which users are automatically added as members of the team and also which users can administer the team. + +Team mappings can be specified separately for each account authentication. + +When Team mapping is positively evaluated, a specified team and its organization are created, if they don’t exist if the related authenticator is allowed to create objects. + + +.Procedure + +. After configuring the authentication details for your authentication type, select *Team* from the *Add authentication mapping* list. +. Enter a unique rule *Name* to identify the rule. +. Select a *Trigger* from the list. See xref:gw-authenticator-map-triggers[Authenticator map triggers] for more information about map triggers. +. Select *Revoke* to deny user access to the system when none of the trigger conditions are matched. +. Select the *Team* and *Organization* to which matching users are added or blocked. +. Select a *Role* to be applied or removed for matching users (for example, *Team Admin* or *Team Member*). +. Click btn:[Next]. + +[role="_additional-resources"] +.Next steps +include::snippets/snip-gw-mapping-next-steps.adoc[] + + + diff --git a/downstream/modules/platform/ref-controller-token-session-management.adoc b/downstream/modules/platform/ref-controller-token-session-management.adoc index 913d68b3f5..5275e0252d 100644 --- a/downstream/modules/platform/ref-controller-token-session-management.adoc +++ b/downstream/modules/platform/ref-controller-token-session-management.adoc @@ -2,11 +2,12 @@ = Token and session management -{ControllerNameStart} supports the following commands for OAuth2 token management: +{PlatformNameShort} supports the following commands for OAuth2 token management: * xref:ref-controller-create-oauth2-token[`create_oauth2_token`] * xref:ref-controller-revoke-oauth2-token[`revoke_oauth2_tokens`] * xref:ref-controller-clear-sessions[`cleartokens`] -* xref:ref-controller-expire-sessions[`expire_sessions`] +//[emcwhinn - Temporarily hiding expire sessions module as it does not yet exist for gateway as per AAP-35735] +//* xref:ref-controller-expire-sessions[`expire_sessions`] * xref:ref-controller-clear-sessions[`clearsessions`] diff --git a/downstream/modules/platform/ref-controller-trial-evaluation.adoc b/downstream/modules/platform/ref-controller-trial-evaluation.adoc index 649b47b321..1f668223a9 100644 --- a/downstream/modules/platform/ref-controller-trial-evaluation.adoc +++ b/downstream/modules/platform/ref-controller-trial-evaluation.adoc @@ -1,9 +1,8 @@ [id="ref-controller-trial-evaluation"] = Trial and evaluation -You require a license to run {ControllerName}. -You can start by using a free trial license. +A license is required to run {PlatformNameShort}. You can start by using a free trial license. -* Trial licenses for {PlatformNameShort} are available at: http://ansible.com/license +* Trial licenses for {PlatformNameShort} are available at the link:https://www.redhat.com/en/products/trials?products=ansible[Red Hat product trial center]. -* Support is not included in a trial license or during an evaluation of the {ControllerName} software. \ No newline at end of file +* Support is not included in a trial license or during an evaluation of the {PlatformNameShort}. \ No newline at end of file diff --git a/downstream/modules/platform/ref-controller-user-roles.adoc b/downstream/modules/platform/ref-controller-user-roles.adoc index a7c6fef5c9..f1d572669f 100644 --- a/downstream/modules/platform/ref-controller-user-roles.adoc +++ b/downstream/modules/platform/ref-controller-user-roles.adoc @@ -1,20 +1,27 @@ +:_mod-docs-content-type: REFERENCE + [id="ref-controller-user-roles"] -= Displaying a user's roles += Adding roles for a user -From the *Users > Details* page, select the *Roles* tab to display the set of roles assigned to this user. -These offer the ability to read, change, and administer projects, inventories, job templates, and other elements. +You can grant access for users to use, read, or write credentials by assigning roles to them. -image:users-permissions-list-for-example-user.png[Users- permissions list] +[NOTE] +==== +Users can not be assigned to an organization by adding roles. Refer to the steps provided in link:{URLCentralAuth}/gw-managing-access#proc-controller-add-organization-user[Adding a user to an organization] for detailed instructions. +==== -//This doesn't seem to fit here. -//[NOTE] -//==== -//The job template administrator may not have access to other resources (inventory, project, credentials, or instance groups) associated with the template. -// -//Without access to these, certain fields in the job template are not editable. -// -//System Administrators can grant individual users permissions to certain resources as necessary. -// -//For more information, see xref:proc-controller-user-permissions[Adding permissions to a user]. -//==== +.Procedure +. From the navigation panel, select {MenuAMUsers}. +. From the *Users* list view, click on the user to which you want to add roles. +. Select the *Roles* tab to display the set of roles assigned to this user. These provide the ability to read, modify, and administer resources. +. To add new roles, click btn:[Add roles]. ++ +include::snippets/snip-gw-roles-note-multiple-components.adoc[] ++ +. Select a Resource type and click btn:[Next]. +. Select the resources that will receive new roles and click btn:[Next]. +. Select the roles that will be applied to the resources and click btn:[Next]. +. Review the settings and click btn:[Finish]. ++ +The Add roles dialog displays indicating whether the role assignments were successfully applied. Click btn:[Close] to close the dialog. diff --git a/downstream/modules/platform/ref-controller-user-teams.adoc b/downstream/modules/platform/ref-controller-user-teams.adoc index 5ca19e8bf2..7b430c1665 100644 --- a/downstream/modules/platform/ref-controller-user-teams.adoc +++ b/downstream/modules/platform/ref-controller-user-teams.adoc @@ -1,15 +1,18 @@ -[id="ref-controller-user-teams"] +:_mod-docs-content-type: REFERENCE -= Displaying a user's teams +[id="ref-controller-user-teams"] -From the *Users > Details* page, select the *Teams* tab to display the list of teams of which that user is a member. += Adding a team for a user -[NOTE] -==== -You cannot modify team membership from this display panel. -For more information, see xref:assembly-controller-teams[Teams]. -==== +You can add a team for a user from the Users list view. -Until you create a team and assign a user to that team, the assigned teams details for that user is displayed as empty. +.Procedure +. From the navigation panel, select {MenuAMUsers}. +. Select the user that you want to which you want to add team membership. +. Select the *Teams* tab to display the list of teams of which that user is a member. +. Click btn:[Add Team(s)]. +. Select the check box for the team to which you want to add the user. +. You can search this list by the team *Name* or *Organization*. ++ +Until a team has been created and a user has been assigned to that team, the assigned team details for that user remain empty. -//image:users-teams-list-for-example-user.png[Users - teams list] diff --git a/downstream/modules/platform/ref-controller-variables.adoc b/downstream/modules/platform/ref-controller-variables.adoc index ac12f67a1b..83a81358e2 100644 --- a/downstream/modules/platform/ref-controller-variables.adoc +++ b/downstream/modules/platform/ref-controller-variables.adoc @@ -2,140 +2,189 @@ = {ControllerNameStart} variables -[cols="50%,50%",options="header"] +[cols="50%,50%,50%",options="header"] |==== -| *Variable* | *Description* -| *`admin_password`* | The password for an administration user to access the UI when the installation is complete. +| *RPM variable name* | *Container variable name* | *Description* +| `admin_email` | | The email address used for the admin user for {ControllerName}. -Passwords must be enclosed in quotes when they are provided in plain text in the inventory file. -| *`automation_controller_main_url`* | For an alternative front end URL needed for SSO configuration, provide the URL. +| `admin_password` | `controller_admin_password`| _Required_ -| *`automationcontroller_password`* | Password for your {ControllerName} instance. +{ControllerNameStart} admin password. Passwords must be enclosed in quotes when they are provided in plain text in the inventory file. -| *`automationcontroller_username`* | Username for your {ControllerName} instance. -| *`nginx_http_port`* | The nginx HTTP server listens for inbound connections. -Default = 80 -| *`nginx_https_port`* | The nginx HTTPS server listens for secure connections. +Use of special characters for this variable is limited. The password can include any printable ASCII character except `/`, `”`, or `@`. + +| `admin_username` | `controller_admin_user` | {ControllerNameStart} admin user. + +Default = `admin` + +| `automation_controller_main_url` | | {ControllerNameStart} main URL. + +| `awx_pg_cert_auth` | `controller_pg_cert_auth` | Set this variable to `true` to enable client certificate authentication. + +Default = `false` + +| `controller_tls_files_remote` | `controller_tls_remote` | {ControllerNameStart} TLS remote files. + +Default = `false` + +| `nginx_disable_hsts` | `controller_nginx_disable_hsts` | Disable NGINX HTTP Strict Transport Security (HSTS). + +Default = `false` -Default = 443 -| *`nginx_hsts_max_age`* | This variable specifies how long, in seconds, the system must be considered as a _HTTP Strict Transport Security_ (HSTS) host. That is, how long HTTPS is used exclusively for communication. +| `nginx_disable_https` | `controller_nginx_disable_https` | Disable NGINX HTTPS. -Default = 63072000 seconds, or two years. -| *`nginx_tls_protocols`* | Defines support for `ssl_protocols` in Nginx. +Default = `false` -Values available `TLSv1`, `TLSv1.1, `TLSv1.2`, `TLSv1.3` +| `nginx_hsts_max_age` | `controller_nginx_hsts_max_age` | This variable specifies how long, in seconds, the system must be considered as an _HTTP Strict Transport Security_ (HSTS) host. That is, how long HTTPS is used only for communication. -The TLSv1.1 and TLSv1.2 parameters only work when OpenSSL 1.0.1 or higher is used. +Default = `63072000` seconds, or two years. -The TLSv1.3 parameter only works when OpenSSL 1.1.1 or higher is used. +| `nginx_http_port` | `controller_nginx_http_port` | The NGINX HTTP server listens for inbound connections. -If `nginx_tls-protocols = ['TLSv1.3']` only TLSv1.3 is enabled. -To set more than one protocol use `nginx_tls_protocols = ['TLSv1.2', 'TLSv.1.3']` +RPM default = `80` -Default = `TLSv1.2`. -| *`nginx_user_headers`* | List of nginx headers for the {ControllerName} web server. +Container default = `8080` -Each element in the list is provided to the web server's nginx configuration as a separate line. +| `nginx_https_port` | `controller_nginx_https_port` | The NGINX HTTPS server listens for secure connections. + +RPM Default = `443` + +Container default = `8443` + +| `nginx_user_headers` | `controller_nginx_user_headers` | List of NGINX headers for the {ControllerName} web server. + +Each element in the list is provided to the web server's NGINX configuration as a separate line. Default = empty list -| *`node_state`* | _Optional_ + +| `node_state` | | _Optional_ The status of a node or group of nodes. Valid options are `active`, `deprovision` to remove a node from a cluster, or `iso_migrate` to migrate a legacy isolated node to an execution node. -Default = `active`. -| *`node_type`* | For `[automationcontroller]` group. +Default = `active` + +| `node_type` | See `receptor_type` for the container equivalent variable. a| + +For the `[automationcontroller]` group the two options are: + +* `node_type=control` - The node only runs project and inventory updates, but not regular jobs. + +* `node_type=hybrid` - The node runs everything. + +Default for this group = `hybrid`. + +For the `[execution_nodes]` group the two options are: + +* `node_type=hop` - The node forwards jobs to an execution node. +* `node_type=execution` - The node can run jobs. + +Default for this group = `execution` + +| `peers` | See `receptor_peers` for the container equivalent variable. | + +Used to indicate which nodes a specific host or group connects to. Wherever this variable is defined, an outbound connection to the specific host or group is established. + +The `peers` variable can be a comma-separated list of hosts and groups from the inventory. This is resolved into a set of hosts that is used to construct the `receptor.conf` file. + + +| `pg_database` | `controller_pg_database` | The name of the PostgreSQL database used by {ControllerName}. + +Default = `awx` + +| `pg_host` | `controller_pg_host` | _Required_ + +The hostname of the PostgreSQL database used by {ControllerName}. + +Default = `127.0.0.1` + +| `pg_password` | `controller_pg_password` | Required if not using client certificate authentication. + +The password for the {ControllerName} PostgreSQL database. + +Use of special characters for this variable is limited. The `!`, `#`, `0` and `@` characters are supported. Use of other special characters can cause the setup to fail. + +| `pg_port` | `controller_pg_port` | Required if not using an internal database. + +The port number of the PostgreSQL database used by {ControllerName}. + +Default = `5432` + +| `pg_sslmode` | `controller_pg_sslmode` | Determines the level of encryption and authentication for client server connections. -Two valid `node_types` can be assigned for this group. +Valid options include `verify-full`, `verify-ca`, `require`, `prefer`, `allow`, `disable`. -A `node_type=control` means that the node only runs project and inventory updates, but not regular jobs. +Default = `prefer` -A `node_type=hybrid` can run everything. +| `pg_username` | `controller_pg_username` | The username for the {ControllerName} PostgreSQL database. -Default for this group = `hybrid` +Default = `awx` -For `[execution_nodes]` group: +| `pgclient_sslcert` | `controller_pg_tls_cert` | Required if using client certificate authentication. -Two valid `node_types` can be assigned for this group. +Path to the PostgreSQL SSL/TLS certificate file for {ControllerName}. -A `node_type=hop` implies that the node forwards jobs to an execution node. +| `pgclient_sslkey` | `controller_pg_tls_key` | Required if using client certificate authentication. -A `node_type=execution` implies that the node can run jobs. +Path to the PostgreSQL SSL/TLS key file for {ControllerName}. -Default for this group = `execution`. -| *`peers`* | _Optional_ +| `web_server_ssl_cert` | `controller_tls_cert` | _Optional_ -The `peers` variable is used to indicate which nodes a specific host or group connects to. Wherever this variable is defined, an outbound connection to the specific host or group is established. +Path to the SSL/TLS certificate file for {ControllerName}. -This variable is used to add `tcp-peer` entries in the `receptor.conf` file used for establishing network connections with other nodes. +| `web_server_ssl_key` | `controller_tls_key` | _Optional_ -The peers variable can be a comma-separated list of hosts and groups from the inventory. -This is resolved into a set of hosts that is used to construct the `receptor.conf` file. +Path to the SSL/TLS key file for {ControllerName}. -| *`pg_database`* | The name of the postgreSQL database. +| | `controller_event_workers` | {ControllerNameStart} event workers. -Default = `awx`. -| *`pg_host`* | The postgreSQL host, which can be an externally managed database. -| *`pg_password`* | The password for the postgreSQL database. +Default = `4` -Use of special characters for `pg_password` is limited. -The `!`, `#`, `0` and `@` characters are supported. -Use of other special characters can cause the setup to fail. +| | `controller_license_file` | The location of your {ControllerName} license file. -NOTE +For example: -You no longer have to provide a `pg_hashed_password` in your inventory file at the time of installation because PostgreSQL 13 can now store user passwords more securely. +`controller_license_file=/path/to/license.zip` -When you supply `pg_password` in the inventory file for the installer, PostgreSQL uses the SCRAM-SHA-256 hash to secure that password as part of the installation process. -| *`pg_port`* | The postgreSQL port to use. +If you are defining this variable as part of the postinstall process (`controller_postinstall = true`), then you need to also set the `controller_postinstall_dir` variable. -Default = 5432 -| *`pg_ssl_mode`* | Choose one of the two available modes: `prefer` and `verify-full`. +| | `controller_nginx_client_max_body_size` | NGINX maximum body size. -Set to `verify-full` for client-side enforced SSL. +Default = `5m` -Default = `prefer`. -| *`pg_username`* | Your postgreSQL database username. +| | `controller_nginx_https_protocols` | NGINX HTTPS protocols. -Default = `awx`. -| *`postgres_ssl_cert`* | Location of the postgreSQL SSL certificate. +Default = `[TLSv1.2, TLSv1.3]` -`/path/to/pgsql_ssl.cert` -| *`postgres_ssl_key`* | Location of the postgreSQL SSL key. +| | `controller_pg_socket` | PostgreSQL Controller UNIX socket. +| | `controller_secret_key` | The secret key value used by {ControllerName} to sign and encrypt data, ensuring secure communication and data integrity between services. -`/path/to/pgsql_ssl.key` -| *`postgres_use_cert`* | Location of the postgreSQL user certificate. +| | `controller_uwsgi_listen_queue_size` | {ControllerNameStart} uWSGI listen queue size. -`/path/to/pgsql.crt` -| *`postgres_use_key`* | Location of the postgreSQL user key. +Default = `2048` -`/path/to/pgsql.key` -| *`postgres_use_ssl`* | Use this variable if postgreSQL uses SSL. -| *`postgres_max_connections`* | Maximum database connections setting to apply if you are using installer-managed postgreSQL. +| | `controller_postinstall` | Enable or disable the postinstall feature of the containerized installer. -See link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#ref-controller-database-settings[PostgreSQL database configuration] in the {ControllerName} administration guide for help selecting a value. +If set to `true`, then you also need to set `controller_license_file` and `controller_postinstall_dir`. -Default for VM-based installations = 200 for a single node -and 1024 for a cluster. -| *`receptor_listener_port`* | Port to use for receptor connection. +Default = `false` -Default = 27199 -| *`supervisor_start_retry_count`* | When specified, it adds `startretries = ` to the supervisor config file (/etc/supervisord.d/tower.ini). +| | `controller_postinstall_dir` | The location of your {ControllerName} postinstall directory. +| | `controller_postinstall_async_delay` | Postinstall delay between retries. -See link:http://supervisord.org/configuration.html#program-x-section-values[program:x Section Values] for more information about `startretries`. +Default = `1` -No default value exists. +| | `controller_postinstall_async_retries` | Postinstall number of tries to attempt. -| *`web_server_ssl_cert`* | _Optional_ +Default = `30` -`/path/to/webserver.cert` +| | `controller_postinstall_ignore_files` | {ControllerNameStart} ignore files. +| | `controller_postinstall_repo_ref` | {ControllerNameStart} repository branch or tag. -Same as `automationhub_ssl_cert` but for web server UI and API. -| *`web_server_ssl_key`* | _Optional_ +Default = `main` -`/path/to/webserver.key` +| | `controller_postinstall_repo_url` | {ControllerNameStart} repository URL. -Same as `automationhub_server_ssl_key` but for web server UI and API. |==== diff --git a/downstream/modules/platform/ref-database-inventory-variables.adoc b/downstream/modules/platform/ref-database-inventory-variables.adoc new file mode 100644 index 0000000000..6acb732fc5 --- /dev/null +++ b/downstream/modules/platform/ref-database-inventory-variables.adoc @@ -0,0 +1,86 @@ +[id="ref-database-inventory-variables"] + += Database variables + +[cols="50%,50%,50%",options="header"] +|==== +| *RPM variable name* | *Container variable name* | *Description* +| `pg_ssl_mode` | | Choose one of the two available modes: `prefer` and `verify-full`. + +Set to `verify-full` for client-side enforced SSL/TLS. + +Default = `prefer` + +| `postgres_max_connections` | `postgresql_max_connections` | Maximum database connections setting to apply if you are using an installer-managed database. + +See link:{URLControllerAdminGuide}/assembly-controller-improving-performance#ref-controller-database-settings[PostgreSQL database configuration and maintenance for {ControllerName}] for help selecting a value. + +Default = `1024` + +| `postgres_ssl_cert` | `postgresql_tls_cert` | Path to the PostgreSQL SSL/TLS certificate file. + +| `postgres_ssl_key` | `postgresql_tls_key` | Path to the PostgreSQL SSL/TLS key file. + +| `postgres_use_cert` | | Location of the PostgreSQL user certificate file. + +`/path/to/pgsql.crt` + +| `postgres_use_ssl` | `postgresql_disable_tls` | Determines if the connection between {PlatformNameShort} and the PostgreSQL database should use SSL/TLS. + +The default for this variable is `false` which means SSL/TLS is not used for PostgreSQL connections. + +When set to `true`, the platform connects to PostgreSQL by using SSL/TLS. + +| | `postgresql_admin_username` | The username for the PostgreSQL admin user. + +When used, the installation program creates each component's database and credentials. + +The PostgreSQL admin user must have `SUPERUSER` privileges. + +Default = `postgres` + +| | `postgresql_admin_password` | Required when using `postgresql_admin_username`. + +The password for the PostgreSQL admin user. + +When used, the installation program creates each component's database and credentials. + +The PostgreSQL admin user must have `SUPERUSER` privileges. + +| | `postgresql_admin_database` | The name of the PostgreSQL admin database. + +Default = `postgres` + +| | `postgresql_effective_cache_size` | This defines the total memory available for caching data. + +The format should be MB. + +| | `postgresql_keep_databases` | Determines whether or not to keep databases during uninstall. + +When set to `true` databases will be kept during uninstall. + +This variable applies to databases managed by the installation program only, and not external customer-created databases. + +Default = `false` + +| | `postgresql_log_destination` | The location of the PostgreSQL log file. + +Default = `/dev/stderr` + +| | `postgresql_password_encryption` | The type of PostgreSQL password encryption to use. + +Default = `scram-sha-256` + +| | `postgresql_shared_buffers` | The amount of memory allocated for caching data within the database. + +The format should be MB. + +| | `postgresql_tls_remote` | PostgreSQL TLS remote files. + +Default = `false` + +| | `postgresql_port` | PostgreSQL port number. + +Default = `5432` + +|==== \ No newline at end of file diff --git a/downstream/modules/platform/ref-eda-controller-variables.adoc b/downstream/modules/platform/ref-eda-controller-variables.adoc index b293a89d33..e7e987aa91 100644 --- a/downstream/modules/platform/ref-eda-controller-variables.adoc +++ b/downstream/modules/platform/ref-eda-controller-variables.adoc @@ -1,76 +1,200 @@ - [id="event-driven-ansible-controller"] = {EDAcontroller} variables -[cols="50%,50%",options="header"] +[cols="50%,50%,50%",options="header"] |==== -| *Variable* | *Description* -| *`automationedacontroller_admin_password`* | The admin password used by the {EdaController} instance. +| *RPM variable name* | *Container variable name* | *Description* -Passwords must be enclosed in quotes when they are provided in plain text in the inventory file. -| *`automationedacontroller_admin_username`* | Username used by django to identify and create the admin superuser in {EDAcontroller}. +| `automationedacontroller_activation_workers` | `eda_activation_workers` | _Optional_ -Default = `admin` -| *`automationedacontroller_admin_email`* | Email address used by django for the admin user for {EDAcontroller}. +Number of workers for ansible-rulebook activation pods in {EDAName}. + +Default = (# of cores or threads) * 2 + 1 + +| `automationedacontroller_admin_email` | `eda_admin_email` | _Optional_ + +Email address used by Django for the admin user for {EDAcontroller}. Default = `admin@example.com` -| *`automationedacontroller_allowed_hostnames`* | List of additional addresses to enable for user access to {EDAcontroller}. + +| `automationedacontroller_admin_password` | `eda_admin_password` | _Required_ + +The admin password used by the {EDAcontroller} instance. + +Passwords must be enclosed in quotes when they are provided in plain text in the `inventory` file. + +Use of special characters for this variable is limited. The password can include any printable ASCII character except `/`, `”`, or `@`. + +| `automationedacontroller_admin_username` | `eda_admin_user` | Username used by Django to identify and create the admin superuser in {EDAcontroller}. + +Default = `admin` + +| `automationedacontroller_allowed_hostnames` | | List of additional addresses to enable for user access to {EDAcontroller}. Default = empty list -| *`automationedacontroller_controller_verify_ssl`* | Boolean flag used to verify automation controller's web certificates when making calls from {EDAcontroller}. Verified is `true`; not verified is `false`. + +| `automationedacontroller_controller_verify_ssl` | | Boolean flag used to verify automation controller's web certificates when making calls from {EDAcontroller}. Verified is `true` and not verified is `false`. Default = `false` -| *`automationedacontroller_disable_https`* | Boolean flag to disable HTTPS {EDAcontroller}. + +| `automationedacontroller_disable_hsts` | `eda_nginx_disable_hsts` | _Optional_ + +Boolean flag to disable HSTS for {EDAcontroller}. Default = `false` -| *`automationedacontroller_disable_hsts`* | Boolean flag to disable HSTS {EDAcontroller}. + +| `automationedacontroller_disable_https` | `eda_nginx_disable_https` | _Optional_ + +Boolean flag to disable HTTPS for {EDAcontroller}. Default = `false` -| *`automationedacontroller_gunicorn_workers`* | Number of workers for the API served through gunicorn. + +| `automationedacontroller_event_stream_path` | `eda_event_stream_prefix_path` | API prefix path used for {EDAName} event-stream through {Gateway}. + +Default = `/eda-event-streams` + +| `automationedacontroller_gunicorn_workers` | `eda_gunicorn_workers` | Number of workers for the API served through Gunicorn. Default = (# of cores or threads) * 2 + 1 -| *`automationedacontroller_max_running_activations`* | The number of maximum activations running concurrently per node. + +| `automationedacontroller_max_running_activations` | `eda_max_running_activations` | _Optional_ + +The number of maximum activations running concurrently per node. This is an integer that must be greater than 0. -Default = 12 -| *`automationedacontroller_nginx_tls_files_remote`* | Boolean flag to specify whether cert sources are on the remote host (true) or local (false). +Default = `12` + +| `automationedacontroller_nginx_tls_files_remote` | `eda_tls_remote` | Boolean flag to specify whether cert sources are on the remote host (true) or local (false). Default = `false` -| *`automationedacontroller_pg_database`* | The Postgres database used by {EDAController}. -Default = `automtionedacontroller`. -| *`automationnedacontroller_pg_host`* | The hostname of the Postgres database used by {EDAController}, which can be an externally managed database. -| *`automationedacontroller_pg_password`* | The password for the Postgres database used by {EDAController}. +| `automationedacontroller_pg_cert_auth` | `eda_pg_cert_auth` | Set this variable to `true` to enable client certificate authentication. + +Default = `false` + +| `automationedacontroller_pg_database` | `eda_pg_database` | The name of the PostgreSQL database used by {EDAName}. + +RPM default = `automationedacontroller` + +Container default = `eda` + +| `automationedacontroller_pg_host` | `eda_pg_host` | _Required_ + +The hostname of the PostgreSQL database used by {EDAName}. + +Default = `127.0.0.1` + +| `automationedacontroller_pg_password` | `eda_pg_password` | Required if not using client certificate authentication. + +The password for the {EDAName} PostgreSQL database. + +Use of special characters for this variable is limited. The `!`, `#`, `0` and `@` characters are supported. Use of other special characters can cause the setup to fail. + +| `automationedacontroller_pg_port` | `eda_pg_port` | Required if not using an internal database. + +The port number of the PostgreSQL database used by {EDAName}. + +Default = `5432` + +| `automationedacontroller_pg_sslmode` | `eda_pg_sslmode` | Determines the level of encryption and authentication for client server connections. + +Valid options include `verify-full`, `verify-ca`, `require`, `prefer`, `allow`, `disable`. + +Default = `prefer` -Use of special characters for `automationedacontroller_pg_password` is limited. -The `!`, `#`, `0` and `@` characters are supported. -Use of other special characters can cause the setup to fail. -| *`automationedacontroller_pg_port`* | The port number of the Postgres database used by {EDAController}. +| `automationedacontroller_pg_username` | `eda_pg_username` | The username for the {EDAName} PostgreSQL database. -Default = `5432`. -| *`automationedacontroller_pg_username`* | The username for your {EDAController} Postgres database. +RPM default = `automationedacontroller` -Default = `automationedacontroller`. -| *`automationedacontroller_rq_workers`* | Number of Redis Queue (RQ) workers used by {EDAcontroller}. RQ workers are Python processes that run in the background. +Container default = `eda` -Default = (# of cores or threads) * 2 + 1 -| *`automationedacontroller_ssl_cert`* | _Optional_ +| `automationedacontroller_pgclient_sslcert` | `eda_pg_tls_cert` | Required if using client certificate authentication. -`/root/ssl_certs/eda.__.com.crt` +Path to the PostgreSQL SSL/TLS certificate file for {EDAName}. -Same as `automationhub_ssl_cert` but for {EDAcontroller} UI and API. -| *`automationedacontroller_ssl_key`* | _Optional_ +| `automationedacontroller_pgclient_sslkey` | `eda_pg_tls_key` | Required if using client certificate authentication. -`/root/ssl_certs/eda.__.com.key` +Path to the PostgreSQL SSL/TLS key file for {EDAName}. -Same as `automationhub_server_ssl_key` but for {EDAcontroller} UI and API. -| *`automationedacontroller_user_headers`* | List of additional nginx headers to add to {EDAcontroller}'s nginx configuration. +| `automationedacontroller_redis_host` | `eda_redis_host` | The Redis hostname used by {EDAcontroller}. + +| `automationedacontroller_redis_port` | `eda_redis_port` | The port used for the Redis host defined by `automationedacontroller_redis_host` for {EDAcontroller}. + +| `automationedacontroller_rq_workers` | | Number of Redis Queue (RQ) workers used by {EDAcontroller}. RQ workers are Python processes that run in the background. + +Default = (# of cores or threads) * 2 + 1 + +| `automationedacontroller_ssl_cert` | `eda_tls_cert` | _Optional_ + +Path to the SSL/TLS certificate file for {EDAName}. + +| `automationedacontroller_ssl_key` | `eda_tls_key` | _Optional_ + +Path to the SSL/TLS key file for {EDAName}. + +| `automationedacontroller_user_headers` | `eda_nginx_user_headers` | List of additional NGINX headers to add to {EDAcontroller}'s NGINX configuration. Default = empty list -//Add this variable back for the next release, as long as approved by development. -//| *`automationedacontroller_websocket_ssl_verify`* | -//SSL verification for the Daphne websocket used by podman to communicate from the pod to the host. Default is false to disable SSL connection as verified -//Default = false +| `eda_node_type` | `eda_type` | _Optional_ + +{EDAcontroller} node type. + +Default = `hybrid` + +| | `eda_debug` | {EDAcontroller} debug. + +Default = `false` + +| | `eda_event_stream_url` | {EDAcontroller} event stream URL. + +| | `eda_main_url` | {EDAcontroller} main URL. + +| | `eda_nginx_client_max_body_size` | NGINX maximum body size. + +Default = `1m` + +| | `eda_nginx_hsts_max_age` | NGINX HSTS maximum age. + +Default = `63072000` + +| | `eda_nginx_http_port` | NGINX HTTP port. + +Default = `8082` + +| | `eda_nginx_https_port` | NGINX HTTPS port. + +Default = `8445` + +| | `eda_nginx_https_protocols` | NGINX HTTPS protocols. + +Default = `[TLSv1.2, TLSv1.3]` + +| | `eda_pg_socket` | PostgreSQL {EDAName} UNIX socket. + +| | `eda_redis_disable_tls` | Disable TLS Redis (for many nodes). + +Default = `false` + +| | `eda_redis_password` | Redis {EDAcontroller} password (for many nodes). + +| | `eda_redis_tls_cert` | _Optional_ + +Path to the {EDAName} Redis certificate file. + +| | `eda_redis_tls_key` | _Optional_ + +Path to the {EDAName} Redis key file. + +| | `eda_redis_username` | Redis {EDAcontroller} username (for many nodes). + +| | `eda_safe_plugins` | {EDAcontroller} safe plugins. + +| | `eda_secret_key` | The secret key value used by {EDAcontroller} to sign and encrypt data, ensuring secure communication and data integrity between services. + +| | `eda_workers` | {EDAcontroller} workers count. + +Default = `2` + |==== diff --git a/downstream/modules/platform/ref-eda-system-requirements.adoc b/downstream/modules/platform/ref-eda-system-requirements.adoc index e8e0259c27..a2c507815c 100644 --- a/downstream/modules/platform/ref-eda-system-requirements.adoc +++ b/downstream/modules/platform/ref-eda-system-requirements.adoc @@ -2,21 +2,35 @@ = {EDAcontroller} system requirements -The {EDAcontroller} is a single-node system capable of handling a variable number of long-running processes (such as rulebook activations) on-demand, depending on the number of CPU cores. Use the following minimum requirements to run, by default, a maximum of 12 simultaneous activations: +The {EDAcontroller} is a single-node system capable of handling a variable number of long-running processes (such as rulebook activations) on-demand, depending on the number of CPU cores. +[NOTE] +==== +If you want to use {EDAName} 2.5 with a 2.4 {ControllerName} version, see link:{BaseURL}/red_hat_ansible_automation_platform/2.4/html-single/using_event-driven_ansible_2.5_with_ansible_automation_platform_2.4/index[Using {EDAName} 2.5 with {PlatformNameShort} 2.4]. +==== + +Use the following minimum requirements to run, by default, a maximum of 12 simultaneous activations: -[cols="a,a",options="header"] +[cols=2*,options="header"] |=== -h| Requirement | Required +| Requirement | Required | *RAM* | 16 GB | *CPUs* | 4 -| *Local disk* | 40 GB minimum +| *Local disk* a| +* Hard drive must be 40 GB minimum with at least 20 GB available under /var. +* Storage volume must be rated for a minimum baseline of 1500 IOPS. +* If the cluster has many large projects or decision environment images, consider doubling the GB in /var to avoid disk space errors. |=== [IMPORTANT] ==== -* If you are running {RHEL} 8 and want to set your memory limits, you must have cgroup v2 enabled before you install {EDAName}. For specific instructions, see the Knowledge-Centered Support (KCS) article, link:https://access.redhat.com/solutions/7054905[Ansible Automation Platform Event-Driven Ansible controller for {RHEL} 8 requires cgroupv2]. +* If you are running {RHEL} 8 and want to set your memory limits, you must have cgroup v2 enabled before you install {EDAName}. +For specific instructions, see the Knowledge-Centered Support (KCS) article, link:https://access.redhat.com/solutions/7054905[Ansible Automation Platform Event-Driven Ansible controller for {RHEL} 8 requires cgroupv2]. + +* When you activate an {EDAName} rulebook under standard conditions, it uses about 250 MB of memory. +However, the actual memory consumption can vary significantly based on the complexity of your rules and the volume and size of the events processed. +In scenarios where a large number of events are anticipated or the rulebook complexity is high, conduct a preliminary assessment of resource usage in a staging environment. +This ensures that your maximum number of activations is based on the capacity of your resources. -* When you activate an {EDAName} rulebook under standard conditions, it uses about 250 MB of memory. However, the actual memory consumption can vary significantly based on the complexity of your rules and the volume and size of the events processed. In scenarios where a large number of events are anticipated or the rulebook complexity is high, conduct a preliminary assessment of resource usage in a staging environment. This ensures that your maximum number of activations is based on the capacity of your resources. See link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/red_hat_ansible_automation_platform_installation_guide/index#ref-single-controller-hub-eda-with-managed-db[Single {ControllerName}, single {HubName}, and single {EDAcontroller} node with external (installer managed) database] for an example on setting {EDAController} maximum -running activations. +For an example of setting {EDAController} maximumrunning activations, see xref:ref-gateway-controller-hub-eda-ext-db[Single {ControllerName}, single {HubName}, and single {EDAcontroller} node with external (installer managed) database]. ==== \ No newline at end of file diff --git a/downstream/modules/platform/ref-edge-manager-additional-fields.adoc b/downstream/modules/platform/ref-edge-manager-additional-fields.adoc new file mode 100644 index 0000000000..3907a8224f --- /dev/null +++ b/downstream/modules/platform/ref-edge-manager-additional-fields.adoc @@ -0,0 +1,66 @@ +[id="edge-manager-additional-fields"] + += List of additional supported fields + +In addition to the metadata fields, each resource has its own unique set of fields that you can select, offering further flexibility in filtering and selection based on resource-specific attributes. + +The following table lists the fields supported for filtering for each resource kind: + +[width="100%",cols="39%,61%",options="header",] +|=== +|Kind |Fields +|*Certificate Signing Request* |`status.certificate` + +|*Device* +|`status.summary.status` + +`status.applicationsSummary.status` + +`status.updated.status` + +`status.lastSeen` + +`status.lifecycle.status` + +|*Enrollment Request* |`status.approval.approved` + +`status.certificate` + +|*Fleet* |`spec.template.spec.os.image` + +|*Repository* |`spec.type` + +`spec.url` + +|*Resource Sync* |`spec.repository` +|=== + +.Examples + +.Example 1: Excluding a specific device by name + +The following command filters out a specific device by its name: + +[source,bash] +---- +flightctl get devices --field-selector 'metadata.name!=c3tkb18x9fw32fzx5l556n0p0dracwbl4uiojxu19g2' +---- + +.Example 2: Filter by owner, labels, and creation timestamp + +This command retrieves devices owned by `Fleet/pos-fleet`, located in the `us` region, and created in 2024: + +[source,bash] +---- +flightctl get devices --field-selector 'metadata.owner=Fleet/pos-fleet, metadata.creationTimestamp >= 2024-01-01T00:00:00Z, metadata.creationTimestamp < 2025-01-01T00:00:00Z' -l 'region=us' +---- + +.Example 3: Filter by Owner, Labels, and Device Status + +This command retrieves devices owned by `Fleet/pos-fleet`, located in the `us` region, and with a `status.updated.status` of either `Unknown` +or `OutOfDate`: + +[source,bash] +---- +flightctl get devices --field-selector 'metadata.owner=Fleet/pos-fleet, status.updated.status in (Unknown, OutOfDate)' -l 'region=us' +---- diff --git a/downstream/modules/platform/ref-edge-manager-device-lifecycle.adoc b/downstream/modules/platform/ref-edge-manager-device-lifecycle.adoc new file mode 100644 index 0000000000..9a880702b7 --- /dev/null +++ b/downstream/modules/platform/ref-edge-manager-device-lifecycle.adoc @@ -0,0 +1,111 @@ +[id="edge-manager-device-lifecycle"] + += Use device lifecycle hooks + +You can use device lifecycle hooks to make the agent run user-defined commands at specific points in the device's lifecycle. +For example, you can add a shell script to your operating system images that backs up your application data. +You can then specify that this script is to run and complete successfully before the agent can start updating the system. + +The following device lifecycle hooks are supported: + +[width="100%",cols="56%,44%",options="header",] +|=== +|Lifecycle Hook |Description +|`beforeUpdating` |This hook is called after the agent completed preparing for the update and before actually making changes to the system. +If an action in this hook returns with failure, the agent cancels the update. + +|`afterUpdating` |This hook is called after the agent has written the update to disk. +If an action in this hook returns with failure,the agent cancels and rolls back the update. + +|`beforeRebooting` |This hook is called before the system reboots. The agent blocks the reboot until running the action has completed or timed out. +If any action in this hook returns with failure, the agent cancels and rolls back the update. + +|`afterRebooting` |This hook is called when the agent first starts after a reboot. +If any action in this hook returns with failure, the agent reports this but continues starting up. +|=== + +For a state diagram defining when each device lifecycle hook is called by the agent, see the ADD LINK[Device API statuses] section. + +You can define device lifecycle hooks by adding rule files to one of two locations in the device's filesystem, where `${lifecyclehook}` is the all-lower-case name of the hook you want to define: + +* Rules in the `/usr/lib/flightctl/hooks.d/${lifecyclehook}/` drop-in directory are read-only. +Therefore, you must add them to the operating system image during ADD LINK[image building]. +* Rules in the `/etc/flightctl/hooks.d/${lifecyclehook}/` drop-in directory are read-writable. +You can update them at runtime by using the methods described in ADD LINK[Managing OS Configuration]. + +If rules are defined in both locations they are merged, whereby files under `/etc` take precedence over files of the same name under `/usr`. +If many rule files are added to a hook's directory, they are processed in lexical order of their file names. + +A rule file is written in YAML format and has a list of one or more actions. +An action can be to run an external command ("run action"). +When you specify many actions for a hook, these actions are performed in sequence, finishing one action before starting the next. +If an action returns with failure, later actions are not executed. + +A run action takes the following parameters: + +[width="100%",cols="45%,55%",options="header",] +|=== +|Parameter |Description +|Run |The absolute path to the command to run, followed by any flags or arguments. + +Example: `/usr/bin/nmcli connection reload`. + +Note that the command is not executed in a shell, so you cannot use shell variables like `$PATH` or `$HOME` or chain commands (`\|` or `;`). However, it is possible to start a shell yourself if necessary by specifying the shell as command to run. + +Example: `/usr/bin/bash -c 'echo $SHELL $HOME $USER'` + +|EnvVars |(Optional) A list of key/value-pairs to set as environment variables for the command. + +|WorkDir |(Optional) The directory the command is run from. + +|Timeout |(Optional) The maximum duration allowed for the action to complete. Specify the duration as a single positive integer followed by a time unit. +The following time units are supported: `s` for seconds, `m` for minutes, and `h` for hours. + +|If |(Optional) A list of conditions that must be true for the action to be run. +If not provided, actions run unconditionally. +|=== + +By default, actions are performed every time the hook is triggered. +However, for the `afterUpdating` hook you can use the `If` parameter to add conditions that must be true for an action to be performed, otherwise the action is skipped. + +In particular, to only run an action if a given file or directory has changed during the update, you can define a "path condition" that takes the following parameters: + +[width="100%",cols="45%,55%",options="header",] +|=== +|Parameter |Description +|Path |An absolute path to a file or directory that must have changed during the update as condition for the action to be performed. Specify paths by using forward slashes (`/`) and if the path is to a directory it must end with a forward slash `/`. + +If you specify a path to a file, the file must have changed to satisfy the condition. +If you specify a path to a directory, a file in that directory or any of its subdirectories must have changed to satisfy the condition. + +|On |A list of file operations (`created`, `updated`, `removed`) to further limit the kind of changes to the specified path as condition for the action to be performed. +|=== + +If you have specified a "path condition" for an action in the `afterUpdating` hook, you have the following variables that you can include in arguments to your command and are replaced with the absolute paths to the changed files: + +[width="100%",cols="43%,57%",options="header",] +|=== +|Variable |Description +|`{{ Path }}` |The absolute path to the file or directory specified in the path condition. + +|`{{ Files }}` |A space-separated list of absolute paths of the files that were changed (created, updated, or removed) during the update and are covered by the path condition. + +|`{{ CreatedFiles }}` |A space-separated list of absolute paths of the files that were changed (created, updated, or removed) during the update and are covered by the path condition. + +|`{{ UpdatedFiles }}` |A space-separated list of absolute paths of the files that were updated during the update and are covered by the path condition. + +|`{{ RemovedFiles }}` |A space-separated list of absolute paths of the files that were removed during the update and are covered by the path condition. +|=== + +The {RedHatEdge} agent includes a built-in set of rules defined in `/usr/lib/flightctl/hooks.d/afterupdating/00-default.yaml`: + +[width="100%",cols="50%,28%,22%",options="header",] +|=== +|If you change the following files |then the agent runs |Description +|`/etc/systemd/system/` |`systemctl daemon-reload` |Changes to systemd units are activated by signaling the systemd daemon to reload the systemd manager configuration. +This reruns all generators, reloads all unit files, and re-creates the entire dependency tree. + +|`/etc/NetworkManager/system-connections/` |`nmcli conn reload` |Changes to Network Manager system connections are activated by signaling Network Manager to reload all connections. + +|`/etc/firewalld/` |`firewall-cmd --reload` |Changes to firewalld's permanent configuration are activated by signaling firewalld to reload firewall rules as new runtime configuration. +|=== diff --git a/downstream/modules/platform/ref-edge-manager-field-selectors.adoc b/downstream/modules/platform/ref-edge-manager-field-selectors.adoc new file mode 100644 index 0000000000..b4b398909b --- /dev/null +++ b/downstream/modules/platform/ref-edge-manager-field-selectors.adoc @@ -0,0 +1,22 @@ +[id="edge-manager-field-selectors"] + += Field selectors + +Field selectors filter a list of {RedHatEdge} resources based on specific resource field values. +They follow the same syntax, principles, and operators as Kubernetes Field and Label selectors, with additional operators available for more advanced search use cases. + +== Supported fields + +{RedHatEdge} resources give a set of metadata fields that you can select. + +Each resource supports the following metadata fields: + +* `metadata.name` +* `metadata.owner` +* `metadata.creationTimestamp` + +[NOTE] + +==== +To query labels, use Label Selectors for advanced and flexible label filtering. +==== diff --git a/downstream/modules/platform/ref-edge-manager-fields-discovery.adoc b/downstream/modules/platform/ref-edge-manager-fields-discovery.adoc new file mode 100644 index 0000000000..98f3129179 --- /dev/null +++ b/downstream/modules/platform/ref-edge-manager-fields-discovery.adoc @@ -0,0 +1,28 @@ +[id="edge-manager-fields-discovery"] + += Fields discovery + +Some {RedHatEdge} resources might expose additional supported fields. +You can discover the supported fields by using `flightctl` with the `--field-selector` option. +If you try to use an unsupported field, the error message lists the available supported fields. + +.Example + +[source,bash] +---- +flightctl get device --field-selector='text' + +Error: listing devices: 400, message: unknown or unsupported selector: unable to resolve selector name "text". Supported selectors are: [metadata.alias metadata.creationTimestamp metadata.name metadata.nameoralias metadata.owner status.applicationsSummary.status status.lastSeen status.summary.status status.updated.status] +---- + +In this example, the field `text` is not a valid field for filtering. +The error message provides a list of supported fields that you can use with `--field-selector` for the `device` resource. + +You can then use one of the supported fields: + +[source,bash] +---- +flightctl get devices --field-selector 'metadata.alias contains cluster' +---- + +In this command, the `metadata.alias` field is checked with the containment operator `contains` to see if it has the value `cluster`. diff --git a/downstream/modules/platform/ref-edge-manager-monitor-device.adoc b/downstream/modules/platform/ref-edge-manager-monitor-device.adoc new file mode 100644 index 0000000000..a62c5a3cce --- /dev/null +++ b/downstream/modules/platform/ref-edge-manager-monitor-device.adoc @@ -0,0 +1,47 @@ +[id="manager-monitor-device"] + += Monitor device resources + +You can set up monitors for device resources and define alerts when the use of these resources crosses a defined threshold. +When the agent alerts the {RedHatEdge} service, the service sets the device status to "degraded" or "error" (depending on the severity level). +This might suspend the rollout of updates and alarm the user as a result. + +[NOTE] +==== +This is not meant to replace an observability solution. +If your use case requires streaming logs and metrics from devices into an observability stack and the device's network bandwidth allows this, see ADD LINK[Adding Device Observability] for more information. +==== + +Resource monitors take the following parameters: + +[width="100%",cols="45%,55%",options="header",] +|=== +|Parameter |Description +|MonitorType |The resource to monitor. +Currently supported resources are "CPU", "Memory", and "Disk". + +|SamplingInterval |The interval in which the monitor samples use, specified as positive integer followed by a time unit ("s" for seconds, "m" for minutes, "h" for hours). + +|AlertRules |A list of alert rules. + +|Path |(Disk monitor only) The absolute path to the directory to monitor. +Utilization reflects the filesystem containing the path, similar to df, even if it’s not a mount point. +|=== + +Alert rules take the following parameters: + +[width="100%",cols="45%,55%",options="header",] +|=== +|Parameter |Description +|Severity |The alert rule's severity level out of "Info", "Warning", or "Critical". +Only one alert rule is allowed per severity level and monitor. + +|Duration |The duration that resource use is measured and averaged over when sampling, specified as positive integer followed by a time unit ("s" for seconds, "m" for minutes, "h" for hours). +It must be smaller than the sampling interval. + +|Percentage |The use threshold that triggers the alert, as percentage value (range 0 to 100 without the "%" sign). + +|Description |A human-readable description of the alert. +This is useful for adding details about the alert that might help with debugging. +//By default it populates the alert as : load is above >% for more than. +|=== \ No newline at end of file diff --git a/downstream/modules/platform/ref-edge-manager-supported-operators.adoc b/downstream/modules/platform/ref-edge-manager-supported-operators.adoc new file mode 100644 index 0000000000..245f4a4b79 --- /dev/null +++ b/downstream/modules/platform/ref-edge-manager-supported-operators.adoc @@ -0,0 +1,137 @@ +[id="edge-manager-supported-operators"] + += Supported operators + +[width="100%",cols="24%,17%,59%",options="header",] +|=== +|Operator |Symbol |Description +|Exists |`exists` |Checks if a field exists + +|DoesNotExist |`!` |Checks if a field does not exist + +|Equals |`=` |Checks if a field is equal to a value + +|DoubleEquals |`==` |Another form of equality check + +|NotEquals |`!=` |Checks if a field is not equal to a value + +|GreaterThan |`>` |Checks if a field is greater than a value + +|GreaterThanOrEquals |`>=` |Checks if a field is greater than or equal to a value + +|LessThan |`<` |Checks if a field is less than a value + +|LessThanOrEquals |`<=` |Checks if a field is less than or equal to a value + +|In |`in` |Checks if a field is within a list of values + +|NotIn |`notin` |Checks if a field is not in a list of values + +|Contains |`contains` |Checks if a field has a value + +|NotContains |`notcontains` |Checks if a field does not contain a value +|=== + +== Operators usage by field type + +Each field type supports a specific subset of operators: + +[width="100%",cols="20,60%,20%",options="header",] +|=== +|Field Type |Supported Operators |Value +|*String* |`Equals`: Matches if the field value is an exact match to the specified string. + +`DoubleEquals`: Matches if the field value is an exact match to the specified string (alternative to `Equals`). + +`NotEquals`: Matches if the field value is not an exact match to the specified string. + +`In`: Matches if the field value matches at least one string in the list. + +`NotIn`: Matches if the field value does not match any of the strings in the list. + +`Contains`: Matches if the field value has the specified substring. + +`NotContains`: Matches if the field value does not contain the specified substring. + +`Exists`: Matches if the field is present. + +`DoesNotExist`: Matches if the field is not present. |Text +string + +|*Timestamp* |`Equals`: Matches if the field value is an exact match to the specified timestamp. + +`DoubleEquals`: Matches if the field value is an exact match to the specified timestamp (alternative to `Equals`). + +`NotEquals`: Matches if the field value is not an exact match to the specified timestamp. + +`GreaterThan`: Matches if the field value is after the specified timestamp. + +`GreaterThanOrEquals`: Matches if the field value is after or equal to the specified timestamp. + +`LessThan`: Matches if the field value is before the specified timestamp. + +`LessThanOrEquals`: Matches if the field value is before or equal to the specified timestamp. + +`In`: Matches if the field value matches at least one timestamp in the list. + +`NotIn`: Matches if the field value does not match any of the timestamps in the list. + +`Exists`: Matches if the field is present. + +`DoesNotExist`: Matches if the field is not present. |RFC 3339 format + +|*Number* |`Equals`: Matches if the field value equals the specified number. + +`DoubleEquals`: Matches if the field value equals the specified number (alternative to `Equals`). + +`NotEquals`: Matches if the field value does not equal to the specified number. + +`GreaterThan`: Matches if the field value is greater than the specified number. + +`GreaterThanOrEquals`: Matches if the field value is greater than or equal to the specified number. + +`LessThan`: Matches if the field value is less than the specified number. + +`LessThanOrEquals`: Matches if the field value is less than or equal to the specified number. + +`In`: Matches if the field value equals at least one number in the list. + +`NotIn`: Matches if the field value does not equal any numbers in the list. + +`Exists`:Matches if the field is present. + +`DoesNotExist`: Matches if the field is not present. |Number format + +|*Boolean* a|`Equals`: Matches if the value is `true` or `false`. + +`DoubleEquals`: Matches if the value is `true` or `false` (alternative to `Equals`). + +`NotEquals`: Matches if the value is the opposite of the specified value. + +`In`: Matches if the value (`true` or `false`) is in the list. + +[NOTE] +==== +The list can only contain `true` or `false`, so this operator is limited in use. +==== + +`NotIn`: Matches if the value is not in the list. + +`Exists`: Matches if the field is present. + +`DoesNotExist`: Matches if the field is not present. |Boolean format (`true`, `false`) + +|*Array* a|`Contains`: Matches if the array has the specified value. + +`NotContains`: Matches if the array does not contain the specified value. `In`: Matches if the array overlaps with the specified values. + +`NotIn`: Matches if the array does not overlap with the specified values. `Exists`: Matches if the field is present. + +`DoesNotExist`:Matches if the field is not present. + +[NOTE] +==== +Using `Array[Index]` treats the element as the type defined for the array elements. For example string, timestamp, number, or boolean. +==== +|Array element +|=== diff --git a/downstream/modules/platform/ref-enabling-automation-hub-collection-and-container-signing.adoc b/downstream/modules/platform/ref-enabling-automation-hub-collection-and-container-signing.adoc index b0a9437429..cc7d9f9e1f 100644 --- a/downstream/modules/platform/ref-enabling-automation-hub-collection-and-container-signing.adoc +++ b/downstream/modules/platform/ref-enabling-automation-hub-collection-and-container-signing.adoc @@ -4,21 +4,152 @@ :_mod-docs-content-type: REFERENCE [id="enabling-automation-hub-collection-and-container-signing_{context}"] -= Enabling {HubNameStart} collection and container signing += Enabling automation content collection and container signing -[role="_abstract"] -{HubNameStart} allows you to sign Ansible collections and container images. This feature is not enabled by default, and you must provide the GPG key. +Automation content signing is disabled by default. To enable it, the following installation variables are required in the inventory file: +[source,yaml] ---- +# Collection signing hub_collection_signing=true -hub_collection_signing_key=/full/path/to/collections/gpg/key +hub_collection_signing_key= + +# Container signing hub_container_signing=true -hub_container_signing_key=/full/path/to/containers/gpg/key +hub_container_signing_key= +---- + +The following variables are required if the keys are protected by a passphrase: + +[source,yaml] +---- +# Collection signing +hub_collection_signing_pass= + +# Container signing +hub_container_signing_pass= +---- + +The `hub_collection_signing_key` and `hub_container_signing_key` variables require the set up of keys before running an installation. + +Automation content signing currently only supports GnuPG (GPG) based signature keys. For more information about GPG, see the link:https://www.gnupg.org/documentation/manpage.html[GnuPG man page]. + +[NOTE] +==== +The algorithm and cipher used is the responsibility of the customer. +==== + +.Procedure + +. On a RHEL9 server run the following command to create a new key pair for collection signing: ++ +---- +gpg --gen-key +---- ++ +. Enter your information for "Real name" and "Email address": ++ +Example output: ++ +---- +gpg --gen-key +gpg (GnuPG) 2.3.3; Copyright (C) 2021 Free Software Foundation, Inc. +This is free software: you are free to change and redistribute it. +There is NO WARRANTY, to the extent permitted by law. + +Note: Use "gpg --full-generate-key" for a full featured key generation dialog. + +GnuPG needs to construct a user ID to identify your key. + +Real name: Joe Bloggs +Email address: jbloggs@example.com +You selected this USER-ID: + "Joe Bloggs " + +Change (N)ame, (E)mail, or (O)kay/(Q)uit? O ---- ++ +If this fails, your environment does not have the necessary prerequisite packages installed for GPG. Install the necessary packages to proceed. ++ +. A dialog box will appear and ask you for a passphrase. This is optional but recommended. +. The keys are then generated, and produce output similar to the following: ++ +---- +We need to generate a lot of random bytes. It is a good idea to perform +some other action (type on the keyboard, move the mouse, utilize the +disks) during the prime generation; this gives the random number +generator a better chance to gain enough entropy. +gpg: key 022E4FBFB650F1C4 marked as ultimately trusted +gpg: revocation certificate stored as '/home/aapuser/.gnupg/openpgp-revocs.d/F001B037976969DD3E17A829022E4FBFB650F1C4.rev' +public and secret key created and signed. -When the GPG key is protected by a passphrase, you must provide the passphrase. +pub rsa3072 2024-10-25 [SC] [expires: 2026-10-25] + F001B037976969DD3E17A829022E4FBFB650F1C4 +uid Joe Bloggs +sub rsa3072 2024-10-25 [E] [expires: 2026-10-25] +---- ++ +Note the expiry date that you can set based on company standards and needs. ++ +. You can view all of your GPG keys by running the following command: ++ +---- +gpg --list-secret-keys --keyid-format=long +---- ++ +. To export the public key run the following command: ++ +---- +gpg --export -a --output collection-signing-key.pub +---- ++ +. To export the private key run the following command: ++ +---- +gpg -a --export-secret-keys > collection-signing-key.priv +---- ++ +. If a passphrase is detected, you will be prompted to enter the passphrase. +. To view the private key file contents, run the following command: ++ +---- +cat collection-signing-key.priv +---- ++ +Example output: ++ +---- +-----BEGIN PGP PRIVATE KEY BLOCK----- + +lQWFBGcbN14BDADTg5BsZGbSGMHypUJMuzmIffzzz4LULrZA8L/I616lzpBHJvEs +sSN6KuKY1TcIwIDCCa/U5Obm46kurpP2Y+vNA1YSEtMJoSeHeamWMDd99f49ItBp + + +j920hRy/3wJGRDBMFa4mlQg= +=uYEF +-----END PGP PRIVATE KEY BLOCK----- ---- -hub_collection_signing_pass= -hub_container_signing_pass= ++ +. Repeat steps 1 to 9 to create a key pair for container signing. +. Add the following variables to the inventory file and run the installation to create the signing services: ++ +[source,yaml] ---- +# Collection signing +hub_collection_signing=true +hub_collection_signing_key=/home/aapuser/aap/ansible-automation-platform-containerized-setup-2.5-2/collection-signing-key.priv +# This variable is required if the key is protected by a passphrase +hub_collection_signing_pass= + +# Container signing +hub_container_signing=true +hub_container_signing_key=/home/aapuser/aap/ansible-automation-platform-containerized-setup-2.5-2/container-signing-key.priv +# This variable is required if the key is protected by a passphrase +hub_container_signing_pass= +---- + +[role="_additional-resources"] +== Additional resources + +* For more information on working with signed containers following an installation, see link:{URLHubManagingContent}/managing-containers-hub#working-with-signed-containers[Working with signed containers] in the {TitleHubManagingContent} guide. \ No newline at end of file diff --git a/downstream/modules/platform/ref-example-CONT-architecture.adoc b/downstream/modules/platform/ref-example-CONT-architecture.adoc new file mode 100644 index 0000000000..bebd5223ad --- /dev/null +++ b/downstream/modules/platform/ref-example-CONT-architecture.adoc @@ -0,0 +1,8 @@ +// This module is included in assembly-aap-architecture.adoc +[id='example_CONT_architecture_{context}'] += Example containerized deployment architecture + +The following reference architecture provides an example setup of an enterprise deployment of containerized {PlatformNameShort}. + +.Example enterprise containerized deployment architecture +image::cont-b-env-a.png[Reference architecture for an example setup of an enterprise containerized {PlatformNameShort} deployment] \ No newline at end of file diff --git a/downstream/modules/platform/ref-example-OCP-architecture.adoc b/downstream/modules/platform/ref-example-OCP-architecture.adoc new file mode 100644 index 0000000000..e8b8da90d7 --- /dev/null +++ b/downstream/modules/platform/ref-example-OCP-architecture.adoc @@ -0,0 +1,8 @@ +// This module is included in assembly-aap-architecture.adoc +[id='example_OCP_architecture_{context}'] += Example Operator-based deployment architecture + +The following reference architecture provides an example setup of an enterprise deployment of {PlatformNameShort} on {OCPShort}. + +.Example enterprise Operator-based deployment architecture +image::ocp-b-env-a.png[Reference architecture for an example setup of an enterprise Operator-based {PlatformNameShort} deployment] \ No newline at end of file diff --git a/downstream/modules/platform/ref-fetching-a-monthly-report.adoc b/downstream/modules/platform/ref-fetching-a-monthly-report.adoc new file mode 100644 index 0000000000..cbc7241e44 --- /dev/null +++ b/downstream/modules/platform/ref-fetching-a-monthly-report.adoc @@ -0,0 +1,106 @@ +:_newdoc-version: 2.18.3 +:_template-generated: 2024-07-15 + +:_mod-docs-content-type: REFERENCE + +[id="fetching-a-monthly-report_{context}"] += Fetching a monthly report + + +== On RHEL +To fetch a monthly report on RHEL, run: + +[source, ] +---- +scp -r username@controller_host:$METRICS_UTILITY_SHIP_PATH/data/// /local/directory/ +---- + +The generated report will have the default name CCSP--.xlsx and will be deposited in the ship path that you specified. + +== On {OCPShort} from the {PlatformNameShort} operator + +Use the following playbook to fetch a monthly consumption report for {PlatformNameShort} on {OCPShort}: + +[source, ] +---- +- name: Copy directory from Kubernetes PVC to local machine + hosts: localhost + + vars: + report_dir_path: "/mnt/metrics/reports/{{ year }}/{{ month }}/" + + tasks: + - name: Create a temporary pod to access PVC data + kubernetes.core.k8s: + definition: + apiVersion: v1 + kind: Pod + metadata: + name: temp-pod + namespace: "{{ namespace_name }}" + spec: + containers: + - name: busybox + image: busybox + command: ["/bin/sh"] + args: ["-c", "sleep 3600"] # Keeps the container alive for 1 hour + volumeMounts: + - name: "{{ pvc }}" + mountPath: "/mnt/metrics" + volumes: + - name: "{{ pvc }}" + persistentVolumeClaim: + claimName: automationcontroller-metrics-utility + restartPolicy: Never + register: pod_creation + + - name: Wait for both initContainer and main container to be ready + kubernetes.core.k8s_info: + kind: Pod + namespace: "{{ namespace_name }}" + name: temp-pod + register: pod_status + until: > + pod_status.resources[0].status.containerStatuses[0].ready + retries: 30 + delay: 10 + + - name: Create a tarball of the directory of the report in the container + kubernetes.core.k8s_exec: + namespace: "{{ namespace_name }}" + pod: temp-pod + container: busybox + command: tar czf /tmp/metrics.tar.gz -C "{{ report_dir_path }}" . + register: tarball_creation + + - name: Copy the report tarball from the container to the local machine + kubernetes.core.k8s_cp: + namespace: "{{ namespace_name }}" + pod: temp-pod + container: busybox + state: from_pod + remote_path: /tmp/metrics.tar.gz + local_path: "{{ local_dir }}/metrics.tar.gz" + when: tarball_creation is succeeded + + - name: Ensure the local directory exists + ansible.builtin.file: + path: "{{ local_dir }}" + state: directory + + - name: Extract the report tarball on the local machine + ansible.builtin.unarchive: + src: "{{ local_dir }}/metrics.tar.gz" + dest: "{{ local_dir }}" + remote_src: yes + extra_opts: "--strip-components=1" + when: tarball_creation is succeeded + + - name: Delete the temporary pod + kubernetes.core.k8s: + api_version: v1 + kind: Pod + namespace: "{{ namespace_name }}" + name: temp-pod + state: absent +---- diff --git a/downstream/modules/platform/ref-gateway-controller-ext-db.adoc b/downstream/modules/platform/ref-gateway-controller-ext-db.adoc new file mode 100644 index 0000000000..09a96d1834 --- /dev/null +++ b/downstream/modules/platform/ref-gateway-controller-ext-db.adoc @@ -0,0 +1,63 @@ + +[id="ref-gateway-controller-ext-db"] + += Single {Gateway} and {ControllerName} with an external (installer managed) database + + +[role="_abstract"] +Use this example to see what is minimally needed within the inventory file to deploy single instances of {Gateway} and {ControllerName} with an external (installer managed) database. + +----- +[automationcontroller] +controller.example.com + +[automationgateway] +gateway.example.com + +[database] +data.example.com + +[all:vars] +admin_password='' +redis_mode=standalone +pg_host='data.example.com' +pg_port=5432 +pg_database='awx' +pg_username='awx' +pg_password='' +pg_sslmode='prefer' # set to 'verify-full' for client-side enforced SSL + +registry_url='registry.redhat.io' +registry_username='' +registry_password='' + +# Automation Gateway configuration +automationgateway_admin_password='' + +automationgateway_pg_host='data.example.com' +automationgateway_pg_port=5432 + +automationgateway_pg_database='automationgateway' +automationgateway_pg_username='automationgateway' +automationgateway_pg_password='' +automationgateway_pg_sslmode='prefer' + +# The main automation gateway URL that clients will connect to (e.g. https://). +# If not specified, the first node in the [automationgateway] group will be used when needed. +# automationgateway_main_url = '' + +# Certificate and key to install in Automation Gateway +# automationgateway_ssl_cert=/path/to/automationgateway.cert +# automationgateway_ssl_key=/path/to/automationgateway.key + +# SSL-related variables +# If set, this will install a custom CA certificate to the system trust store. +# custom_ca_cert=/path/to/ca.crt +# Certificate and key to install in nginx for the web UI and API +# web_server_ssl_cert=/path/to/tower.cert +# web_server_ssl_key=/path/to/tower.key +# Server-side SSL settings for PostgreSQL (when we are installing it). +# postgres_use_ssl=False +# postgres_ssl_cert=/path/to/pgsql.crt +# postgres_ssl_key=/path/to/pgsql.key +----- diff --git a/downstream/modules/platform/ref-gateway-controller-hub-eda-ext-db.adoc b/downstream/modules/platform/ref-gateway-controller-hub-eda-ext-db.adoc new file mode 100644 index 0000000000..a3853de2e5 --- /dev/null +++ b/downstream/modules/platform/ref-gateway-controller-hub-eda-ext-db.adoc @@ -0,0 +1,136 @@ +[id="ref-gateway-controller-hub-eda-ext-db"] + += Single {Gateway}, {ControllerName}, {HubName}, and {EDAcontroller} with an external (installer managed) database + +[role="_abstract"] +Use this example to populate the inventory file to deploy single instances of {Gateway}, {ControllerName}, {HubName}, and {EDAcontroller} with an external (installer managed) database. + +[IMPORTANT] +==== +* This scenario requires a minimum of {ControllerName} 2.4 for successful deployment of {EDAcontroller}. + +* {EDAController} must be installed on a separate server and cannot be installed on the same host as {HubName} and {ControllerName}. + +* When an {EDAName} rulebook is activated under standard conditions, it uses approximately 250 MB of memory. However, the actual memory consumption can vary significantly based on the complexity of the rules and the volume and size of the events processed. +In scenarios where a large number of events are anticipated or the rulebook complexity is high, conduct a preliminary assessment of resource usage in a staging environment. +This ensures that the maximum number of activations is based on the resource capacity. +In the following example, the default `automationedacontroller_max_running_activations` setting is 12, but can be adjusted according to fit capacity. + +==== + +[literal, subs="+attributes"] +----- +[automationcontroller] +controller.example.com + +[automationhub] +automationhub.example.com + +[automationedacontroller] +automationedacontroller.example.com + +[automationgateway] +gateway.example.com + +[database] +data.example.com + +[all:vars] +admin_password='' +redis_mode=standalone +pg_host='data.example.com' +pg_port='5432' +pg_database='awx' +pg_username='awx' +pg_password='' +pg_sslmode='prefer' # set to 'verify-full' for client-side enforced SSL + +registry_url='registry.redhat.io' +registry_username='' +registry_password='' + +# {HubNameStart} configuration + +automationhub_admin_password= + +automationhub_pg_host='data.example.com' +automationhub_pg_port=5432 + +automationhub_pg_database='automationhub' +automationhub_pg_username='automationhub' +automationhub_pg_password= +automationhub_pg_sslmode='prefer' + +# Automation {EDAController} configuration + +automationedacontroller_admin_password='' + +automationedacontroller_pg_host='data.example.com' +automationedacontroller_pg_port=5432 + +automationedacontroller_pg_database='automationedacontroller' +automationedacontroller_pg_username='automationedacontroller' +automationedacontroller_pg_password='' + +# Keystore file to install in SSO node +# sso_custom_keystore_file='/path/to/sso.jks' + +# This install will deploy SSO with sso_use_https=True +# Keystore password is required for https enabled SSO +sso_keystore_password='' + +# This install will deploy a TLS enabled Automation Hub. +# If for some reason this is not the behavior wanted one can +# disable TLS enabled deployment. +# +# automationhub_disable_https = False +# The default install will generate self-signed certificates for the Automation +# Hub service. If you are providing valid certificate via automationhub_ssl_cert +# and automationhub_ssl_key, one should toggle that value to True. +# +# automationhub_ssl_validate_certs = False +# SSL-related variables +# If set, this will install a custom CA certificate to the system trust store. +# custom_ca_cert=/path/to/ca.crt +# Certificate and key to install in Automation Hub node +# automationhub_ssl_cert=/path/to/automationhub.cert +# automationhub_ssl_key=/path/to/automationhub.key + +# Automation Gateway configuration +automationgateway_admin_password='' + +automationgateway_pg_host='' +automationgateway_pg_port=5432 + +automationgateway_pg_database='automationgateway' +automationgateway_pg_username='automationgateway' +automationgateway_pg_password='' +automationgateway_pg_sslmode='prefer' + +# The main automation gateway URL that clients will connect to (e.g. https://). +# If not specified, the first node in the [automationgateway] group will be used when needed. +# automationgateway_main_url = '' + +# Certificate and key to install in Automation Gateway +# automationgateway_ssl_cert=/path/to/automationgateway.cert +# automationgateway_ssl_key=/path/to/automationgateway.key + +# Certificate and key to install in nginx for the web UI and API +# web_server_ssl_cert=/path/to/tower.cert +# web_server_ssl_key=/path/to/tower.key +# Server-side SSL settings for PostgreSQL (when we are installing it). +# postgres_use_ssl=False +# postgres_ssl_cert=/path/to/pgsql.crt +# postgres_ssl_key=/path/to/pgsql.key + +# Boolean flag used to verify Automation Controller's +# web certificates when making calls from Automation {EDAcontroller}. +# automationedacontroller_controller_verify_ssl = true +# +# Certificate and key to install in Automation {EDAcontroller} node +# automationedacontroller_ssl_cert=/path/to/automationeda.crt +# automationedacontroller_ssl_key=/path/to/automationeda.key + +----- +.Additional resources +For more information about these inventory variables, refer to the link:{URLInstallationGuide}/index#ref-hub-variables[{HubNameMain} variables]. \ No newline at end of file diff --git a/downstream/modules/platform/ref-gateway-controller-hub-ext-db.adoc b/downstream/modules/platform/ref-gateway-controller-hub-ext-db.adoc new file mode 100644 index 0000000000..e50d907c4e --- /dev/null +++ b/downstream/modules/platform/ref-gateway-controller-hub-ext-db.adoc @@ -0,0 +1,88 @@ +[id="ref-gateway-controller-hub-ext-db"] + += Single {Gateway}, {ControllerName}, and {HubName} with an external (installer managed) database + +[role="_abstract"] +Use this example to populate the inventory file to deploy single instances of {Gateway}, {ControllerName}, and {HubName} with an external (installer managed) database. + +----- +[automationcontroller] +controller.example.com + +[automationhub] +automationhub.example.com + +[automationgateway] +gateway.example.com + +[database] +data.example.com + +[all:vars] +admin_password='' +redis_mode=standalone +pg_host='data.example.com' +pg_port='5432' +pg_database='awx' +pg_username='awx' +pg_password='' +pg_sslmode='prefer' # set to 'verify-full' for client-side enforced SSL + +registry_url='registry.redhat.io' +registry_username='' +registry_password='' + +automationhub_admin_password= + +automationhub_pg_host='data.example.com' +automationhub_pg_port=5432 + +automationhub_pg_database='automationhub' +automationhub_pg_username='automationhub' +automationhub_pg_password= +automationhub_pg_sslmode='prefer' + +# The default install will deploy a TLS enabled Automation Hub. +# If for some reason this is not the behavior wanted one can +# disable TLS enabled deployment. +# +# automationhub_disable_https = False +# The default install will generate self-signed certificates for the Automation +# Hub service. If you are providing valid certificate via automationhub_ssl_cert +# and automationhub_ssl_key, one should toggle that value to True. +# +# automationhub_ssl_validate_certs = False +# SSL-related variables +# If set, this will install a custom CA certificate to the system trust store. +# custom_ca_cert=/path/to/ca.crt +# Certificate and key to install in Automation Hub node +# automationhub_ssl_cert=/path/to/automationhub.cert +# automationhub_ssl_key=/path/to/automationhub.key + +# Automation Gateway configuration +automationgateway_admin_password='' + +automationgateway_pg_host='' +automationgateway_pg_port=5432 + +automationgateway_pg_database='automationgateway' +automationgateway_pg_username='automationgateway' +automationgateway_pg_password='' +automationgateway_pg_sslmode='prefer' + +# The main automation gateway URL that clients will connect to (e.g. https://). +# If not specified, the first node in the [automationgateway] group will be used when needed. +# automationgateway_main_url = '' + +# Certificate and key to install in Automation Gateway +# automationgateway_ssl_cert=/path/to/automationgateway.cert +# automationgateway_ssl_key=/path/to/automationgateway.key + +# Certificate and key to install in nginx for the web UI and API +# web_server_ssl_cert=/path/to/tower.cert +# web_server_ssl_key=/path/to/tower.key +# Server-side SSL settings for PostgreSQL (when we are installing it). +# postgres_use_ssl=False +# postgres_ssl_cert=/path/to/pgsql.crt +# postgres_ssl_key=/path/to/pgsql.key +----- diff --git a/downstream/modules/platform/ref-gateway-system-requirements.adoc b/downstream/modules/platform/ref-gateway-system-requirements.adoc new file mode 100644 index 0000000000..a62309172d --- /dev/null +++ b/downstream/modules/platform/ref-gateway-system-requirements.adoc @@ -0,0 +1,5 @@ +[id="ref-gateway-system-requirements"] + += {GatewayStart} system requirements + +The {Gateway} is the service that handles authentication and authorization for {PlatformNameShort}. It provides a single entry into the platform and serves the platform's user interface. \ No newline at end of file diff --git a/downstream/modules/platform/ref-gateway-variables.adoc b/downstream/modules/platform/ref-gateway-variables.adoc new file mode 100644 index 0000000000..eafa595f1a --- /dev/null +++ b/downstream/modules/platform/ref-gateway-variables.adoc @@ -0,0 +1,162 @@ + +[id="ref-gateway-variables"] += {GatewayStart} variables + +[cols="50%,50%,50%",options="header"] +|==== +| *RPM variable name* | *Container variable name* | *Description* +| `automationgateway_admin_email` | `gateway_admin_email` | The email address used for the admin user for {Gateway}. + +| `automationgateway_admin_password` | `gateway_admin_password` | _Required_ + +The admin password used to connect to the {Gateway} instance. + +Passwords must be enclosed in quotes when they are provided in plain text in the `inventory` file. + +Use of special characters for this variable is limited. The password can include any printable ASCII character except `/`, `”`, or `@`. + +| `automationgateway_admin_username` | `gateway_admin_user` | _Optional_ + +The username used to identify and create the admin superuser in {Gateway}. + +Default = `admin` + +| `automationgateway_disable_hsts` | `gateway_nginx_disable_hsts` | _Optional_ + +Disable NGINX HSTS. + +Default = `false` + +| `automationgateway_disable_https` | `gateway_nginx_disable_https` | _Optional_ + +Disable NGINX HTTPS. + +Default = `false` + +| `automationgateway_grpc_auth_service_timeout` | `gateway_grpc_auth_service_timeout` | {GatewayStart} auth server timeout. + +Default = `30s` + +| `automationgateway_grpc_server_max_threads_per_process` | `gateway_grpc_server_max_threads_per_process` | {GatewayStart} auth server threads per process. + +Default = `10` + +| `automationgateway_grpc_server_processes` | `gateway_grpc_server_processes` | {GatewayStart} auth server processes + +Default = `5` + +| `automationgateway_main_url` | `gateway_main_url` | _Optional_ + +The main {Gateway} URL that clients will connect to (e.g. `\https://`). + +If not specified, the first the first node in the `[automationgateway]` group will be used when needed. + +| `automationgateway_pg_cert_auth` | `gateway_pg_cert_auth` | Set this variable to `true` to enable client certificate authentication. + +Default = `false` + +| `automationgateway_pg_database` | `gateway_pg_database` | The name of the PostgreSQL database used by {Gateway}. + +RPM default = `automationgateway` + +Container default = `gateway` + +| `automationgateway_pg_host` | `gateway_pg_host` | _Required_ + +The hostname of the PostgreSQL database used by {Gateway}. + +Default = `127.0.0.1` + +| `automationgateway_pg_password` | `gateway_pg_password` | Required if not using client certificate authentication. + +The password for the {Gateway} PostgreSQL database. + +Use of special characters for this variable is limited. The `!`, `#`, `0` and `@` characters are supported. Use of other special characters can cause the setup to fail. + +| `automationgateway_pg_port` | `gateway_pg_port` | Required if not using an internal database. + +The port number of the PostgreSQL database used by {Gateway}. + +Default = `5432` + +| `automationgateway_pg_sslmode` | `gateway_pg_sslmode` | Determines the level of encryption and authentication for client server connections. + +Valid options include `verify-full`, `verify-ca`, `require`, `prefer`, `allow`, `disable`. + +Default = `prefer` + +| `automationgateway_pg_username` | `gateway_pg_username` | The username for the {Gateway} PostgreSQL database. + +RPM default = `automationgateway` + +Container default = `gateway` + +| `automationgateway_pgclient_sslcert` | `gateway_pg_tls_cert` | Required if using client certificate authentication. + +Path to the PostgreSQL SSL/TLS certificate file for {Gateway}. + +| `automationgateway_pgclient_sslkey` | `gateway_pg_tls_key` | Required if using client certificate authentication. + +Path to the PostgreSQL SSL/TLS key file for {Gateway}. + +| `automationgateway_redis_host` | `gateway_redis_host` | The Redis hostname used by {Gateway}. + +| `automationgateway_redis_port` | `gateway_redis_port` | The Redis {Gateway} port. + +Default = `6379` + +| `automationgateway_ssl_cert` | `gateway_tls_cert` | _Optional_ + +Path to the SSL/TLS certificate file for {Gateway}. + +| `automationgateway_ssl_key` | `gateway_tls_key` | _Optional_ + +Path to the SSL/TLS key file for {Gateway}. + +| | `gateway_nginx_client_max_body_size` | NGINX maximum body size. + +Default = `5m` + +| | `gateway_nginx_hsts_max_age` | NGINX HSTS maximum age. + +Default = `63072000` + +| | `gateway_nginx_http_port` | NGINX HTTP port. + +| | `gateway_nginx_https_port` | NGINX HTTPS port. + +| | `gateway_nginx_https_protocols` | NGINX HTTPS protocols. + +Default = `[TLSv1.2, TLSv1.3]` + +| | `gateway_nginx_user_headers` | Custom NGINX headers. + +| | `gateway_redis_disable_tls` | Disable TLS Redis. + +Default = `false` + +| | `gateway_redis_password` | Redis {Gateway} password. + +| | `gateway_redis_tls_cert` | _Optional_ + +Path to the {Gateway} Redis certificate file. + +| | `gateway_redis_tls_key` | _Optional_ + +Path to the {Gateway} Redis key file. + +| | `gateway_redis_username` | Redis {Gateway} username. + +Default = `gateway` + +| | `gateway_secret_key` | The secret key value used by {Gateway} to sign and encrypt data, ensuring secure communication and data integrity between services. + +| | `gateway_tls_remote` | {GatewayStart} TLS remote files. + +Default = `false` + +| | `gateway_uwsgi_listen_queue_size` | {GatewayStart} uWSGI listen queue size. + +Default = `4096` + +|==== diff --git a/downstream/modules/platform/ref-general-inventory-variables.adoc b/downstream/modules/platform/ref-general-inventory-variables.adoc index 0b10521d27..465f5c60ab 100644 --- a/downstream/modules/platform/ref-general-inventory-variables.adoc +++ b/downstream/modules/platform/ref-general-inventory-variables.adoc @@ -1,44 +1,139 @@ -[id="ref-genera-inventory-variables"] +[id="ref-general-inventory-variables"] = General variables -[cols="50%,50%",options="header"] +[cols="50%,50%,50%",options="header"] |==== -| *Variable* | *Description* -| *`enable_insights_collection`* | The default install registers the node to the {InsightsName} Service if the node is registered with Subscription Manager. -Set to `False` to disable. +| *RPM variable name* | *Container variable name* | *Description* +| `aap_ca_cert_file` |`ca_tls_cert` | Define a Certification Authority certificate along with a matching key when you want the installation program to create leaf certificates for each product for you. + +| `aap_ca_cert_files_remote` | `ca_tls_remote` | Denote whether the CA provided certificate files are local to the installation program (`false`) or on the remote component server (`true`). + +Default = `false` + +| `aap_ca_key_file` | `ca_tls_key` | Define the key for the matching certificate when you want the installation program to create leaf certificates for each product for you. + +| `bundle_install` | `bundle_install` | Set to `true` to enable a bundled installation. + +Default = `false` + +| `bundle_install_folder` | `bundle_dir`| Specify the path to the bundle directory when performing a bundle install. + +Container default = `false` + +RPM Default = `/var/lib/ansible-automation-platform-bundle` + +| `custom_ca_cert` | `custom_ca_cert` | The path to the custom CA certificate file. + +If set, this installs a custom CA certificate to the system truststore. + +| `enable_insights_collection` | | The default install registers the node to the {InsightsName} for the {PlatformName} Service if the node is registered with Subscription Manager. + +Set to `false` to disable. Default = `true` -| *`nginx_user_http_config`* | List of nginx configurations for `/etc/nginx/nginx.conf` under the http section. + +| `nginx_tls_protocols` | | Defines support for `ssl_protocols` in NGINX. + +Values available `TLSv1`, `TLSv1.1`, `TLSv1.2`, `TLSv1.3`. + +The `TLSv1.1` and `TLSv1.2` parameters only work when OpenSSL 1.0.1 or higher is used. + +The `TLSv1.3` parameter only works when OpenSSL 1.1.1 or higher is used. + +If `nginx_tls-protocols = ['TLSv1.3']` only `TLSv1.3` is enabled. To set more than one protocol use `nginx_tls_protocols = ['TLSv1.2', 'TLSv.1.3']`. + +Default = `TLSv1.2` + +| `nginx_user_http_config` | | List of NGINX configurations for `/etc/nginx/nginx.conf` under the `http` section. Each element in the list is provided into `http nginx config` as a separate line. -Default = empty list -| *`registry_password`* | `registry_password` is only required if a non-bundle installer is used. +Default = {} + +| `redis_cluster_ip` | `redis_cluster_ip` | The IPv4 address used by the Redis cluster to identify each host in the cluster. + +Redis clusters cannot use hostnames or IPv6 addresses. When defining hosts in the `[redis]` group, use this variable to identify the IPv4 address if the default is not what you want. + +| `redis_mode` | `redis_mode` | The Redis mode to use for your {PlatformNameShort} installation. + +Possible values are: `standalone` and `cluster`. + +For more information about Redis, see link:{URLPlanningGuide}/ha-redis_planning[Caching and queueing system] in _{TitlePlanningGuide}_. -Password credential for access to `registry_url`. +Default = `cluster` -Used for both `[automationcontroller]` and `[automationhub]` groups. +| `registry_password` | `registry_password` | Required if performing an online non-bundled installation. -Enter your Red Hat Registry Service Account credentials in `registry_username` and `registry_password` to link to the Red Hat container registry. +The password credential for access to the registry source defined in `registry_url`. -When `registry_url` is `registry.redhat.io`, username and password are required if not using a bundle installer. -| *`registry_url`* | Used for both `[automationcontroller]` and `[automationhub]` groups. +// This content is used in RPM installation +ifdef::aap-install[] +For more information, see link:{URLInstallationGuide}/assembly-platform-install-scenario#proc-set-registry-username-password[Setting registry_username and registry_password]. +endif::aap-install[] +// This content is used in Containerized installation +ifdef::container-install[] +For more information, see link:{URLContainerizedInstall}/aap-containerized-installation#proc-set-registry-username-password[Setting registry_username and registry_password]. +endif::container-install[] -Default = `registry.redhat.io`. -| *`registry_username`* | `registry_username` is only required if a non-bundle installer is used. +| `registry_url` | `registry_url` | URL for the registry source. -User credential for access to `registry_url`. +Default = `registry.redhat.io` -Used for both `[automationcontroller]` and `[automationhub]` groups, but only if the value of `registry_url` is `registry.redhat.io`. +| `registry_username` | `registry_username` | Required if performing an online non-bundled installation. -Enter your Red Hat Registry Service Account credentials in `registry_username` and `registry_password` to link to the Red Hat container registry. -| *`routable_hostname`* | `routable hostname` is used if the machine running the installer can only route to the target host through a specific URL, for example, if you use shortnames in your inventory, but the node running the installer can only resolve that host using FQDN. +The username credential for access to the registry source defined in `registry_url`. + +// This content is used in RPM installation +ifdef::aap-install[] +For more information, see link:{URLInstallationGuide}/assembly-platform-install-scenario#proc-set-registry-username-password[Setting registry_username and registry_password]. +endif::aap-install[] +// This content is used in Containerized installation +ifdef::container-install[] +For more information, see link:{URLContainerizedInstall}/aap-containerized-installation#proc-set-registry-username-password[Setting registry_username and registry_password]. +endif::container-install[] + +| `registry_verify_ssl` | `registry_tls_verify` | Controls whether SSL/TLS certificate verification should be enabled or disabled when making HTTPS requests. + +Default = `true` + +| `routable_hostname` |`routable_hostname` | This variable is used if the machine running the installation program can only route to the target host through a specific URL. For example, if you use short names in your inventory, but the node running the installation program can only resolve that host by using a FQDN. If `routable_hostname` is not set, it should default to `ansible_host`. If you do not set `ansible_host`, `inventory_hostname` is used as a last resort. This variable is used as a host variable for particular hosts and not under the `[all:vars]` section. -For further information, see link:https://docs.ansible.com/ansible/latest/inventory_guide/intro_inventory.html#assigning-a-variable-to-one-machine-host-variables[Assigning a variable to one machine:host variables]. + +For further information, see link:https://docs.ansible.com/ansible/latest/inventory_guide/intro_inventory.html#assigning-a-variable-to-one-machine-host-variables[Assigning a variable to one machine: host variables]. + +|| `backup_dir` | The location of the backup directory on the Ansible host. Used when performing backup and restore. + +Default = `~/backups` + + +| | `container_compress` | Container compression software. + +Default = `gzip` + +| | `container_keep_images` | Keep container images. + +Default = `false` + +| | `container_pull_images` | Pull newer container images. + +Default = `true` + +| | `registry_auth` | Use registry authentication. + +Default = `true` + +| | `registry_ns_aap` | {PlatformNameShort} registry namespace. + +Default = `ansible-automation-platform-25` + +| | `registry_ns_rhel` | RHEL registry namespace. + +Default = `rhel8` + |==== diff --git a/downstream/modules/platform/ref-get-started-credential-types.adoc b/downstream/modules/platform/ref-get-started-credential-types.adoc index 857820a4fe..07abd1be89 100644 --- a/downstream/modules/platform/ref-get-started-credential-types.adoc +++ b/downstream/modules/platform/ref-get-started-credential-types.adoc @@ -2,17 +2,20 @@ = Getting started with credential types //[ddacosta] Consider rewriting this as a procedure. -From the navigation panel, select {MenuAMCredentialType}. -If no custom credential types have been created, the *Credential Types* prompts you to add one. +.Procedure +. From the navigation panel, select {MenuAECredentials}. +If no custom credential types have been created, the *Credential Types* page prompts you to add one. ++ //image:credential-types-home-empty.png[Credential Types - empty] - ++ If credential types have been created, this page displays a list of existing and available Credential Types. - ++ //image:credential-types-home-with-example-types.png[Credential Types - example credential types] -To view more information about a credential type, click the name of a credential or the Edit image:leftpencil.png[Edit, 15,15] icon. +. Select the name of a credential or the Edit image:leftpencil.png[Edit, 15,15] icon to view more information about a credential type, . -Each credential type displays its own unique configurations in the *Input Configuration* field and the *Injector Configuration* field, if -applicable. +. On the *Details* tab, each credential type displays its own unique configurations in the *Input Configuration* field and the *Injector Configuration* field, if applicable. Both YAML and JSON formats are supported in the configuration fields. + +//NOTE The Back to Credential Types Tab throws an error. diff --git a/downstream/modules/platform/ref-gs-install-config.adoc b/downstream/modules/platform/ref-gs-install-config.adoc new file mode 100644 index 0000000000..70e61cb777 --- /dev/null +++ b/downstream/modules/platform/ref-gs-install-config.adoc @@ -0,0 +1,11 @@ +[id="ref-gs-install-config"] + += {PlatformNameShort} installation and configuration + +{PlatformName} offers flexible installation and configuration options. +Depending on your organization's needs, you can install {PlatformName} using one of the following methods, based on your environment: + +* link:{LinkInstallationGuide} +* link:{LinkOperatorInstallation} +* link:{BaseURL}/ansible_on_clouds/2.x[Cloud environments] +* link:{LinkContainerizedInstall} diff --git a/downstream/modules/platform/ref-guidelines-hosts-groups.adoc b/downstream/modules/platform/ref-guidelines-hosts-groups.adoc index 7ac74f44c8..33087f0666 100644 --- a/downstream/modules/platform/ref-guidelines-hosts-groups.adoc +++ b/downstream/modules/platform/ref-guidelines-hosts-groups.adoc @@ -6,6 +6,10 @@ * When using an external database, ensure the `[database]` sections of your inventory file are properly set up. * To improve performance, do not colocate the database and the {ControllerName} on the same server. +[IMPORTANT] +==== +When using an external database with {PlatformNameShort}, you must create and maintain that database. Ensure that you clear your external database when uninstalling the {PlatformNameShort}. +==== .{HubNameStart} * If there is an `[automationhub]` group, you must include the variables `automationhub_pg_host` and `automationhub_pg_port`. @@ -13,7 +17,7 @@ * Do not install {HubNameMain} and {ControllerName} on the same node. * Provide a reachable IP address or fully qualified domain name (FQDN) for the `[automationhub]` and `[automationcontroller]` hosts to ensure that users can synchronize and install content from {HubNameMain} and {ControllerName} from a different node. + -The FQDN must not contain either the `-` or the `_` symbols, as it will not be processed correctly. +The FQDN must not contain the `_` symbol, as it will not be processed correctly in Skopeo. You may use the `-` symbol, as long as it is not at the start or the end of the host name. + Do not use `localhost`. @@ -31,11 +35,14 @@ If you use one value in `[database]` and both {ControllerName} and {HubNameMain} .{ControllerNameStart} * {ControllerNameStart} does not configure replication or failover for the database that it uses. -* {ControllerName} works with any replication that you have. +* {ControllerNameStart} works with any replication that you have. .{EDAcontroller} * {EDAcontroller} must be installed on a separate server and cannot be installed on the same host as {HubName} and {ControllerName}. +.{GatewayStart} +* The {Gateway} is the service that handles authentication and authorization for {PlatformNameShort}. It provides a single entry into the platform and serves the platform’s user interface. + .Clustered installations * When upgrading an existing cluster, you can also reconfigure your cluster to omit existing instances or instance groups. Omitting the instance or the instance group from the inventory file is not enough to remove them from the cluster. diff --git a/downstream/modules/platform/ref-gw-access-rules-apps-tokens.adoc b/downstream/modules/platform/ref-gw-access-rules-apps-tokens.adoc new file mode 100644 index 0000000000..45f9b1abf6 --- /dev/null +++ b/downstream/modules/platform/ref-gw-access-rules-apps-tokens.adoc @@ -0,0 +1,24 @@ +[id="ref-gw-access-rules-apps-tokens"] + +Access rules for applications are as follows: + +* Platform administrators can view and manipulate all applications in the system. +//[ddacosta-aap-38726] Org administrators do not have this access in gateway. +//* Organization administrators can view and manipulate all applications belonging to organization members. +//* Other users can only view, update, and delete their own applications, but cannot create any new applications. +* Platform auditors can only view applications in the system. +* Tokens, on the other hand, are resources used to authenticate incoming requests and mask the permissions of the underlying user. + +Access rules for tokens are as follows: + +* Users can create personal access tokens for themselves. +* Platform administrators are able to view and manipulate every token in the system. +//[ddacosta-aap-38726] Org administrators do not have this access in gateway. +//* Organization administrators are able to view and manipulate all tokens belonging to organization members. +* Platform auditors can only view tokens in the system. +* Other normal users are only able to view and manipulate their own tokens. + +[NOTE] +==== +Users can only view the token or refresh the token value at the time of creation. +==== \ No newline at end of file diff --git a/downstream/modules/platform/ref-gw-application-functions.adoc b/downstream/modules/platform/ref-gw-application-functions.adoc new file mode 100644 index 0000000000..b499932a14 --- /dev/null +++ b/downstream/modules/platform/ref-gw-application-functions.adoc @@ -0,0 +1,17 @@ +[id="ref-gw-application-functions"] + += Application functions + +Several OAuth 2 utilities are available for authorization, token refresh, and revoke. +You can specify the following grant types when creating an application: + +Password:: This grant type is ideal for users who have native access to the web application and must be used when the client is the resource owner. +Authorization code:: This grant type should be used when access tokens must be issued directly to an external application or service. + +[NOTE] +==== +You can only use the authorization code type to acquire an access token when using an application. When integrating an external web application with {PlatformNameShort}, that web application might need to create OAuth2 tokens on behalf of users in that other web application. Creating an application in the platform with the authorization code grant type is the preferred way to do this because: + +* This allows an external application to obtain a token from {PlatformNameShort} for a user, using their credentials. +* Compartmentalized tokens issued for a particular application enables those tokens to be easily managed. For example, revoking _all_ tokens associated with that application without having to revoke all tokens in the system. +==== diff --git a/downstream/modules/platform/ref-gw-request-token-after-expiration.adoc b/downstream/modules/platform/ref-gw-request-token-after-expiration.adoc new file mode 100644 index 0000000000..07df4682e8 --- /dev/null +++ b/downstream/modules/platform/ref-gw-request-token-after-expiration.adoc @@ -0,0 +1,19 @@ +[id="gw-request-token-after-expiration"] + += Requesting an access token after expiration + +The *Gateway access token expiration* defaults to 600 seconds (10 minutes). + +The best way to set up application integrations using the *Authorization code* grant type is to allowlist the origins for those cross-site requests. More generally, you must allowlist the service or application you are integrating with the platform, for which you want to provide access tokens. + +To do this, have your administrator add this allowlist to their local {PlatformNameShort} settings file: + +---- +CORS_ORIGIN_ALLOW_ALL = True +CORS_ALLOWED_ORIGIN_REGEXES = [ + r"http://django-oauth-toolkit.herokuapp.com*", + r"http://www.example.com*" +] +---- + +Where `http://django-oauth-toolkit.herokuapp.com` and `http://www.example.com` are applications requiring tokens with which to access the platform. diff --git a/downstream/modules/platform/ref-ha-hub-reqs.adoc b/downstream/modules/platform/ref-ha-hub-reqs.adoc index 67bd7443d6..b4ac439d89 100644 --- a/downstream/modules/platform/ref-ha-hub-reqs.adoc +++ b/downstream/modules/platform/ref-ha-hub-reqs.adoc @@ -2,16 +2,18 @@ = High availability {HubName} requirements -Before deploying a high availability (HA) {HubName}, ensure that you have a shared filesystem installed in your environment and that you have configured your network storage system, if applicable. +Before deploying a high availability (HA) {HubName}, ensure that you have a shared storage file system installed in your environment and that you have configured your network storage system, if applicable. -== Required shared filesystem +== Required shared storage -A high availability {HubName} requires you to have a shared file system, such as NFS, already installed in your environment. Before you run the {PlatformName} installer, verify that you installed the `/var/lib/pulp` directory across your cluster as part of the shared file system installation. +Shared storage is required when installing more than one {HubNameStart} with a `file` storage backend. The supported shared storage type for RPM-based installations is Network File System (NFS). + +Before you run the {PlatformName} installer, verify that you installed the `/var/lib/pulp` directory across your cluster as part of the shared storage file system installation. The {PlatformName} installer returns an error if `/var/lib/pulp` is not detected in one of your nodes, causing your high availability {HubName} setup to fail. If you receive an error stating `/var/lib/pulp` is not detected in one of your nodes, ensure `/var/lib/pulp` is properly mounted in all servers and re-run the installer. -== Installing firewalld for network storage +== Installing firewalld for HA hub deployment If you intend to install a HA {HubName} using a network storage on the {HubName} nodes itself, you must first install and use `firewalld` to open the necessary ports as required by your shared storage system before running the {PlatformNameShort} installer. diff --git a/downstream/modules/platform/ref-hub-variables.adoc b/downstream/modules/platform/ref-hub-variables.adoc index dbbb8aabd7..063d902683 100644 --- a/downstream/modules/platform/ref-hub-variables.adoc +++ b/downstream/modules/platform/ref-hub-variables.adoc @@ -1,384 +1,326 @@ [id="ref-hub-variables"] -= {HubNameMain} variables += {HubNameStart} variables -[cols="50%,50%",options="header"] +[cols="50%,50%,50%",options="header"] |==== -| *Variable* | *Description* -| *`automationhub_admin_password`* | Required +| *RPM variable name* | *Container variable name* | *Description* +| `automationhub_admin_password` | `hub_admin_password` | _Required_ -Passwords must be enclosed in quotes when they are provided in plain text in the inventory file. -| *`automationhub_api_token`* a| If upgrading from {PlatformNameShort} 2.0 or earlier, you must either: +Required passwords must be enclosed in quotes when they are provided in plain text in the inventory file. -* provide an existing {HubNameMain} token as `automationhub_api_token`, or +Use of special characters for this variable is limited. The password can include any printable ASCII character except `/`, `”`, or `@`. -* set `generate_automationhub_token` to `true` to generate a new token +| `automationhub_api_token` | | This variable can be used to provide the installation program with an existing token. -Generating a new token invalidates the existing token. -| *`automationhub_authentication_backend`* a| This variable is not set by default. -Set it to `ldap` to use LDAP authentication. +For example, a regenerated token in Hub UI will invalidate an existing token. Use `automationhub_api_token` to use that token in the installation program the next time you run the installation program. -When this is set to `ldap`, you must also set the following variables: +| `automationhub_auto_sign_collections` | `hub_collection_auto_sign` | If a collection signing service is enabled, collections are not signed automatically by default. -* `automationhub_ldap_server_uri` -* `automationhub_ldap_bind_dn` -* `automationhub_ldap_bind_password` -* `automationhub_ldap_user_search_base_dn` -* `automationhub_ldap_group_search_base_dn` - -If any of these are absent, the installation will be halted. +Setting this parameter to `true` signs them by default. -| *`automationhub_auto_sign_collections`* | If a collection signing service is enabled, collections are not signed automatically by default. +Default = `false` -Setting this parameter to `true` signs them by default. +| `automationhub_backup_collections` | | _Optional_ -Default = `false`. -| *`automationhub_backup_collections`* | _Optional_ +{HubNameMain} provides artifacts in `/var/lib/pulp`. {ControllerNameStart} automatically backs up the artifacts by default. -{HubNameMain} provides artifacts in `/var/lib/pulp`. -{ControllerNameStart} automatically backs up the artifacts by default. +You can also set `automationhub_backup_collections` to `false` and the backup and restore process will not backup or restore `/var/lib/pulp`. -You can also set `automationhub_backup_collections` to false and the backup/restore process does not then backup or restore `/var/lib/pulp`. +Default = `true` -Default = `true`. -| *`automationhub_collection_download_count`* | _Optional_ +| `automationhub_collection_download_count` | | _Optional_ Determines whether download count is displayed on the UI. -Default = `false`. -| *`automationhub_collection_seed_repository`* a| When you run the bundle installer, validated content is uploaded to the `validated` repository, and certified content is uploaded to the `rh-certified` repository. +Default = `false` + +| `automationhub_collection_seed_repository` | | When you run the bundle installer, validated content is uploaded to the `validated` repository, and certified content is uploaded to the `rh-certified` repository. By default, both certified and validated content are uploaded. -Possible values of this variable are 'certified' or 'validated'. +Possible values of this variable are `certified` or `validated`. If you do not want to install content, set `automationhub_seed_collections` to `false` to disable the seeding. If you only want one type of content, set `automationhub_seed_collections` to `true` and `automationhub_collection_seed_repository` to the type of content you do want to include. -| *`automationhub_collection_signing_service_key`* | If a collection signing service is enabled, you must provide this variable to ensure that collections can be properly signed. -`/absolute/path/to/key/to/sign` -| *`automationhub_collection_signing_service_script`* | If a collection signing service is enabled, you must provide this variable to ensure that collections can be properly signed. +| `automationhub_collection_signing_service_key` | `hub_collection_signing_key` | Required when a collection signing service is enabled to ensure that collections can be properly signed. + +The path to the collection signing key file. + +| `automationhub_collection_signing_service_script` | | If a collection signing service is enabled, you must provide this variable to ensure that collections can be properly signed. `/absolute/path/to/script/that/signs` -| *`automationhub_create_default_collection_signing_service`* | Set this variable to true to create a collection signing service. -Default = `false`. -| *`automationhub_container_signing_service_key`* | If a container signing service is enabled, you must provide this variable to ensure that containers can be properly signed. +| `automationhub_container_signing_service_key` | `hub_container_signing_key` | Required when a container signing service is enabled to ensure that containers can be properly signed. -`/absolute/path/to/key/to/sign` -| *`automationhub_container_signing_service_script`* | If a container signing service is enabled, you must provide this variable to ensure that containers can be properly signed. +The path to the container signing key file. + +| `automationhub_container_signing_service_script` | | If a container signing service is enabled, you must provide this variable to ensure that containers can be properly signed. `/absolute/path/to/script/that/signs` -| *`automationhub_create_default_container_signing_service`* | Set this variable to true to create a container signing service. -Default = `false`. -| *`automationhub_disable_hsts`* | The default installation deploys a TLS enabled {HubNameMain}. +| `automationhub_create_default_collection_signing_service` | `hub_collection_signing` | Set this variable to `true` to enable a collection signing service. + +Default = `false` + +| `automationhub_create_default_container_signing_service` | `hub_container_signing` | Set this variable to `true` to enable a container signing service. + +Default = `false` + +| `automationhub_disable_hsts` | `hub_nginx_disable_hsts` | The default installation deploys a TLS enabled {HubName}. Use this variable if you deploy {HubName} with _HTTP Strict Transport Security_ (HSTS) web-security policy enabled. -This variable disables, the HSTS web-security policy mechanism. +This variable disables the HSTS web-security policy mechanism. + +Default = `false` -Default = `false`. -| *`automationhub_disable_https`* | _Optional_ +| `automationhub_disable_https` | `hub_nginx_disable_https` | _Optional_ -If {HubNameMain} is deployed with HTTPS enabled. +If {HubName} is deployed with HTTPS enabled. -Default = `false`. -| *`automationhub_enable_api_access_log`* | When set to `true`, this variable creates a log file at `/var/log/galaxy_api_access.log` that logs all user actions made to the platform, including their username and IP address. +Default = `false` -Default = `false`. -| *`automationhub_enable_analytics`* | A Boolean indicating whether to enable pulp analytics for the version of pulpcore used in {HubName} in {PlatformNameShort} {PlatformVers}. +| `automationhub_enable_analytics` | | A Boolean indicating whether to enable pulp analytics for the version of `pulpcore` used in {HubName} in {PlatformNameShort} {PlatformVers}. -To enable pulp analytics, set `automationhub_enable_analytics` to true. +To enable pulp analytics, set `automationhub_enable_analytics` to `true`. -Default = `false`. -| *`automationhub_enable_unauthenticated_collection_access`* | Set this variable to true to enable unauthorized users to view collections. +Default = `false` -Default = `false`. -| *`automationhub_enable_unauthenticated_collection_download`* | Set this variable to true to enable unauthorized users to download collections. +| `automationhub_enable_api_access_log` | | When set to `true`, this variable creates a log file at `/var/log/galaxy_api_access.log` that logs all user actions made to the platform, including their username and IP address. -Default = `false`. -| *`automationhub_importer_settings`* | _Optional_ +Default = `false` -Dictionary of setting to pass to galaxy-importer. +| `automationhub_enable_unauthenticated_collection_access` | | Set this variable to `true` to enable unauthorized users to view collections. -At import time collections can go through a series of checks. +Default = `false` + +| `automationhub_enable_unauthenticated_collection_download` | | Set this variable to `true` to enable unauthorized users to download collections. + +Default = `false` + +| `automationhub_importer_settings` | `hub_galaxy_importer` | _Optional_ + +Dictionary of setting to pass to galaxy-importer. At import time, collections can go through a series of checks. Behavior is driven by `galaxy-importer.cfg` configuration. Examples are `ansible-doc`, `ansible-lint`, and `flake8`. This parameter enables you to drive this configuration. -| *`automationhub_main_url`* | The main {HubName} URL that clients connect to. + +| `automationhub_main_url` | `hub_main_url` | The main {HubName} URL that clients connect to. For example, \https://. Use `automationhub_main_url` to specify the main {HubName} URL that clients connect to if you are implementing {RHSSO} on your {HubName} environment. If not specified, the first node in the `[automationhub]` group is used. -| *`automationhub_pg_database`* | _Required_ -The database name. +| `automationhub_pg_cert_auth` | `hub_pg_cert_auth` | Set this variable to `true` to enable client certificate authentication. -Default = `automationhub`. -| *`automationhub_pg_host`* | Required if not using an internal database. +Default = `false` -The hostname of the remote PostgreSQL database used by {HubName}. +| `automationhub_pg_database` | `hub_pg_database` | The name of the PostgreSQL database used by {HubName}. -Default = `127.0.0.1`. -| *`automationhub_pg_password`* | The password for the {HubName} PostgreSQL database. +RPM default = `automationhub` -Use of special characters for `automationhub_pg_password` is limited. -The `!`, `#`, `0` and `@` characters are supported. -Use of other special characters can cause the setup to fail. -| *`automationhub_pg_port`* | Required if not using an internal database. +Container default = `pulp` -Default = 5432. -| *`automationhub_pg_sslmode`* | Required. +| `automationhub_pg_host` | `hub_pg_host` | _Required_ -Default = `prefer`. -| *`automationhub_pg_username`* | Required +The hostname of the PostgreSQL database used by {HubName}. -Default = `automationhub`. -| *`automationhub_require_content_approval`* | _Optional_ +Default = `127.0.0.1` -Value is `true` if {HubName} enforces the approval mechanism before collections are made available. +| `automationhub_pg_password` | `hub_pg_password` | Required if not using client certificate authentication. -By default when you upload collections to {HubName} an administrator must approve it before they are made available to the users. +The password for the {HubName} PostgreSQL database. -If you want to disable the content approval flow, set the variable to `false`. +Use of special characters for this variable is limited. The `!`, `#`, `0` and `@` characters are supported. Use of other special characters can cause the setup to fail. -Default = `true`. -| *`automationhub_seed_collections`* | A Boolean that defines whether or not preloading is enabled. +| `automationhub_pg_port` | `hub_pg_port` | Required if not using an internal database. -When you run the bundle installer, validated content is uploaded to the `validated` repository, and certified content is uploaded to the `rh-certified` repository. +The port number of the PostgreSQL database used by {HubName}. -By default, both certified and validated content are uploaded. +Default = `5432` -If you do not want to install content, set `automationhub_seed_collections` to `false` to disable the seeding. +| `automationhub_pg_sslmode` | `hub_pg_sslmode` | Determines the level of encryption and authentication for client server connections. -If you only want one type of content, set `automationhub_seed_collections` to `true` and `automationhub_collection_seed_repository` to the type of content you do want to include. +Valid options include `verify-full`, `verify-ca`, `require`, `prefer`, `allow`, `disable`. -Default = `true`. -| *`automationhub_ssl_cert`* | _Optional_ +Default = `prefer` +| `automationhub_pg_username` | `hub_pg_username` | The username for your {HubName} PostgreSQL database. -`/path/to/automationhub.cert` -Same as `web_server_ssl_cert` but for {HubName} UI and API. -| *`automationhub_ssl_key`* | _Optional_ +RPM default = `automationhub` -`/path/to/automationhub.key`. +Container default = `pulp` -Same as `web_server_ssl_key` but for {HubName} UI and API -| *`automationhub_ssl_validate_certs`* | For {PlatformName} 2.2 and later, this value is no longer used. +| `automationhub_pgclient_sslcert` | `hub_pg_tls_cert` | Required if using client certificate authentication. -Set value to `true` if {HubName} must validate certificates when requesting itself because by default, {PlatformNameShort} deploys with self-signed certificates. +The path to the PostgreSQL SSL/TLS certificate file for {HubName}. -Default = `false`. -| *`automationhub_upgrade`* | *Deprecated* +| `automationhub_pgclient_sslkey` | `hub_pg_tls_key` | Required if using client certificate authentication. -For {PlatformNameShort} 2.2.1 and later, the value of this has been fixed at `true`. +The path to the PostgreSQL SSL/TLS key file for {HubName}. -{HubNameStart} always updates with the latest packages. -| *`automationhub_user_headers`* | List of nginx headers for {HubNameMain}'s web server. +| `automationhub_require_content_approval` | | _Optional_ -Each element in the list is provided to the web server's nginx configuration as a separate line. +Value is `true` if {HubName} enforces the approval mechanism before collections are made available. -Default = empty list -| *`ee_from_hub_only`* | When deployed with {HubName} the installer pushes {ExecEnvShort} images to {HubName} and configures {ControllerName} to pull images from the {HubName} registry. +By default when you upload collections to {HubName}, an administrator must approve it before they are made available to the users. -To make {HubName} the only registry to pull {ExecEnvShort} images from, set this variable to `true`. +If you want to disable the content approval flow, set the variable to `false`. -If set to `false`, {ExecEnvShort} images are also taken directly from Red Hat. +Default = `true` -Default = `true` when the bundle installer is used. -| *`generate_automationhub_token`* a| If upgrading from {PlatformName} 2.0 or earlier, choose one of the following options: +| `automationhub_seed_collections` | `hub_seed_collections` | A Boolean that defines whether or not pre-loading of collections is enabled. When you run the bundle installer, validated content is uploaded to the `validated` repository, and certified content is uploaded to the `rh-certified` repository. By default, certified content and validated content are both uploaded. -* provide an existing {HubNameMain} token as `automationhub_api_token` +If you do not want to install content, set this variable to `false` to disable the seeding. -* set `generate_automationhub_token` to `true` to generate a new token. -Generating a new token will invalidate the existing token. -| *`nginx_hsts_max_age`* | This variable specifies how long, in seconds, the system should be considered as a _HTTP Strict Transport Security_ (HSTS) host. That is, how long HTTPS is used exclusively for communication. +For the RPM-based installer, if you only want one type of content, set this variable to `true` and set the `automationhub_collection_seed_repository` variable to the type of content you want to include. -Default = 63072000 seconds, or two years. -| *`nginx_tls_protocols`* | Defines support for `ssl_protocols` in Nginx. +Default = `true` -Values available `TLSv1`, `TLSv1.1, `TLSv1.2`, `TLSv1.3` +| `automationhub_ssl_cert` | `hub_tls_cert` | _Optional_ -The TLSv1.1 and TLSv1.2 parameters only work when OpenSSL 1.0.1 or higher is used. +`/path/to/automationhub.cert` -The TLSv1.3 parameter only works when OpenSSL 1.1.1 or higher is used. +Same as `web_server_ssl_cert` but for {HubName} UI and API. -If `nginx_tls-protocols = ['TLSv1.3']` only TLSv1.3 is enabled. -To set more than one protocol use `nginx_tls_protocols = ['TLSv1.2', 'TLSv.1.3']` +| `automationhub_ssl_key` | `hub_tls_key` | _Optional_ -Default = `TLSv1.2`. -| *`pulp_db_fields_key`* | Relative or absolute path to the Fernet symmetric encryption key that you want to import. -The path is on the Ansible management node. -It is used to encrypt certain fields in the database, such as credentials. -If not specified, a new key will be generated. -| *`sso_automation_platform_login_theme`* | _Optional_ +`/path/to/automationhub.key`. -Used for {PlatformNameShort} managed and externally managed {RHSSO}. +Same as `web_server_ssl_key` but for {HubName} UI and API. -Path to the directory where theme files are located. -If changing this variable, you must provide your own theme files. +| `automationhub_user_headers` | | List of NGINX headers for {HubNameMain}'s web server. -Default = `ansible-automation-platform`. -| *`sso_automation_platform_realm`* | _Optional_ +Each element in the list is provided to the web server's NGINX configuration as a separate line. -Used for {PlatformNameShort} managed and externally managed {RHSSO}. +Default = empty list -The name of the realm in SSO. +| `ee_from_hub_only` | | When deployed with {HubName}, the installation program pushes {ExecEnvShort} images to {HubName} and configures {ControllerName} to pull images from the {HubName} registry. -Default = `ansible-automation-platform`. -| *`sso_automation_platform_realm_displayname`* | _Optional_ +To make {HubName} the only registry to pull {ExecEnvShort} images from, set this variable to `true`. -Used for {PlatformNameShort} managed and externally managed {RHSSO}. +If set to `false`, {ExecEnvShort} images are also taken directly from Red Hat. -Display name for the realm. +Default = `true` when the bundle installer is used. -Default = `Ansible Automation Platform`. -//| *`sso_http_port`* or *`sso_https_port`* | IP or routable hostname for SSO. -// -//Default = `8080` for http, `8443` for https -| *`sso_console_admin_username`* | _Optional_ +|`generate_automationhub_token` | | When performing a fresh installation, a new token will automatically be generated by default. If you want the installation program to regenerate a new token, set `generate_automationhub_token=true` and the installation program will use it in the installation process. -Used for {PlatformNameShort} managed and externally managed {RHSSO}. +| `nginx_hsts_max_age` | `hub_nginx_hsts_max_age` | This variable specifies how long, in seconds, the system should be considered as an _HTTP Strict Transport Security_ (HSTS) host. That is, how long HTTPS is used exclusively for communication. -SSO administration username. +Default = `63072000` seconds, or two years. -Default = `admin`. -| *`sso_console_admin_password`* | _Required_ +| `pulp_db_fields_key` | | Relative or absolute path to the Fernet symmetric encryption key that you want to import. +The path is on the Ansible management node. It is used to encrypt certain fields in the database, such as credentials. +If not specified, a new key will be generated. +| | `hub_azure_account_key` | Required when using an Azure blob storage backend. -Used for {PlatformNameShort} managed and externally managed {RHSSO}. +The Azure blob storage account key. -SSO administration password. -//| *`sso_console_keystore_file`* | Keystore file to install in SSO node. -// -//`/path/to/sso.jks` -| *`sso_custom_keystore_file`* | _Optional_ +| | `hub_azure_account_name` | Required when using an Azure blob storage backend. -Used for {PlatformNameShort} managed {RHSSO} only. +The account name associated with the Azure blob storage. -Customer-provided keystore for SSO. -| *`sso_host`* | _Required_ +| | `hub_azure_container` | The name of the Azure blob storage container. -Used for {PlatformNameShort} externally managed {RHSSO} only. +Default = `pulp` -{HubNameStart} requires SSO and SSO administration credentials for -authentication. +| | `hub_azure_extra_settings` | Used to define extra parameters for the Azure blob storage backend. -If SSO is not provided in the inventory for configuration, then you must use this variable to define the SSO host. -| *`sso_keystore_file_remote`* | _Optional_ +For more information about the list of parameters, see link:https://django-storages.readthedocs.io/en/latest/backends/azure.html#settings[django-storages documentation - Azure Storage]. -Used for {PlatformNameShort} managed {RHSSO} only. +Default = `{}` +| | `hub_tls_remote` | {HubNameStart} TLS remote files. -Set to `true` if the customer-provided keystore is on a remote node. +Default = `false` -Default = `false`. -| *`sso_keystore_name`* | _Optional_ +| | `hub_nginx_client_max_body_size` | NGINX maximum body size. -Used for {PlatformNameShort} managed {RHSSO} only. +Default = `20m` -Name of keystore for SSO. +| | `hub_nginx_http_port` | NGINX HTTP port. -Default = `ansible-automation-platform`. -| *`sso_keystore_password`* | Password for keystore for HTTPS enabled SSO. +Default = `8081` -Required when using {PlatformNameShort} managed SSO and when HTTPS is enabled. The default install deploys SSO with `sso_use_https=true`. -| *`sso_redirect_host`* | _Optional_ +| | `hub_nginx_https_port` | NGINX HTTPS port. -Used for {PlatformNameShort} managed and externally managed {RHSSO}. +Default = `8444` -If `sso_redirect_host` is set, it is used by the application to connect to SSO for authentication. +| | `hub_nginx_https_protocols` | NGINX HTTPS protocols. -This must be reachable from client machines. -| *`sso_ssl_validate_certs`* | _Optional_ +Default = `[TLSv1.2, TLSv1.3]` -Used for {PlatformNameShort} managed and externally managed {RHSSO}. +| | `hub_pg_socket` | PostgreSQL {HubName} UNIX socket. -Set to `true` if the certificate must be validated during connection. +| | `hub_secret_key` | The secret key value used by {HubName} to sign and encrypt data, ensuring secure communication and data integrity between services. -Default = `true`. +| | `hub_storage_backend` | {HubNameStart} storage backend type. -| *`sso_use_https`* | _Optional_ +Possible values include: `azure`, `file`, `s3`. -Used for {PlatformNameShort} managed and externally managed {RHSSO} if Single Sign On uses HTTPS. +Default = `file` -Default = `true`. -|==== +| | `hub_workers` | {HubNameStart} workers count. -For {HubNameMain} to connect to LDAP directly, you must configure the following variables: -A list of additional LDAP related variables that can be passed using the `ldap_extra_settings` variable, see the link:https://django-auth-ldap.readthedocs.io/en/latest/reference.html#settings[Django reference documentation]. -[cols="50%,50%",options="header"] -|==== -| *Variable* | *Description* -| *`automationhub_ldap_bind_dn`* | The name to use when binding to the LDAP server with `automationhub_ldap_bind_password`. +| | `hub_container_signing_pass` | Required when the container signing service is protected by a passphrase. + +The password for the automation content container signing service. + +| | `hub_collection_signing_pass` | Required when the collection signing service is protected by a passphrase. + +The password for the automation content collection signing service. -Must be set when integrating {PrivateHubName} with LDAP, or the installation will fail. +| | `hub_postinstall` | Enable {HubNameStart} postinstall. -| *`automationhub_ldap_bind_password`* | _Required_ +Default = `false` -The password to use with `automationhub_ldap_bind_dn`. +| | `hub_postinstall_async_delay` | Postinstall delay between retries. -Must be set when integrating {PrivateHubName} LDAP, or the installation will fail. -| *`automationhub_ldap_group_search_base_dn`* | An LDAP Search object that finds all LDAP groups that users might belong to. +Default = `1` -If your configuration makes any references to LDAP groups, you must set this variable and `automationhub_ldap_group_type`. +| | `hub_postinstall_async_retries` | -Must be set when integrating {PrivateHubName} with LDAP, or the installation will fail. +Postinstall number of retries to perform. -Default = `None` -| *`automationhub_ldap_group_search_filter`* | _Optional_ +Default = `30` -Search filter for finding group membership. +| | `hub_postinstall_dir` | {HubNameStart} postinstall directory. -Variable identifies what objectClass type to use for mapping groups with {HubName} and LDAP. -Used for installing {HubName} with LDAP. +| | `hub_postinstall_ignore_files` | {HubNameStart} ignore files. +| | `hub_postinstall_repo_ref` | {HubNameStart} repository branch or tag. -Default = `(objectClass=Group)` -| *`automationhub_ldap_group_search_scope`* | _Optional_ +Default = `main` -Scope to search for groups in an LDAP tree using the django framework for LDAP authentication. -Used for installing {HubName} with LDAP. +| | `hub_postinstall_repo_url` | {HubNameStart} repository URL. +| | `hub_shared_data_path` | Required when installing more than one instance of {HubName} with a `file` storage backend. When installing a single instance of {HubName}, it is optional. -Default = `SUBTREE` -| *`automationhub_ldap_group_type`* | +Path to the Network File System (NFS) share with read, write, and execute (RWX) access. -Describes the type of group returned by *automationhub_ldap_group_search*. +| | `hub_shared_data_mount_opts` | Mount options for the Network File System (NFS) share. -This is set dynamically based on the the values of *automationhub_ldap_group_type_params* and *automationhub_ldap_group_type_class*, otherwise it is the default value coming from django-ldap which is 'None' +Default = `rw,sync,hard` -Default = `django_auth_ldap.config:GroupOfNamesType` -| *`automationhub_ldap_group_type_class`* | _Optional_ +| | `hub_s3_access_key` | Required when using an AWS S3 storage backend. -The importable path for the django-ldap group type class. +The AWS S3 access key. -Variable identifies the group type used during group searches within the django framework for LDAP authentication. -Used for installing {HubName} with LDAP. +| | `hub_s3_secret_key` | Required when using an AWS S3 storage backend. -Default =`django_auth_ldap.config:GroupOfNamesType` -//Removed as it seems not to be an inventory file variable, but is used in ldapextras.yml -//| *`automationhub_ldap_group_type_params`* | -// -//Default = "name_attr": "cn" -| *`automationhub_ldap_server_uri`* | The URI of the LDAP server. +The AWS S3 secret key. -Use any URI that is supported by your underlying LDAP libraries. +| | `hub_s3_bucket_name` | The name of the AWS S3 storage bucket. -Must be set when integrating {PrivateHubName} LDAP, or the installation will fail. -| *`automationhub_ldap_user_search_base_dn`* | An LDAP Search object that locates a user in the directory. -The filter parameter must contain the placeholder %(user)s for the username. -It must return exactly one result for authentication to succeed. +Default = `pulp` -Must be set when integrating {PrivateHubName} with LDAP, or the installation will fail. -| *`automationhub_ldap_user_search_filter`* | _Optional_ +| | `hub_s3_extra_settings` | Used to define extra parameters for the AWS S3 storage backend. -Default = `'(uid=%(user)s)'` -| *`automationhub_ldap_user_search_scope`* | _Optional_ +For more information about the list of parameters, see link:https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings[django-storages documentation - Amazon S3]. -Scope to search for users in an LDAP tree by using the django framework for LDAP authentication. -Used for installing {HubName} with LDAP. +Default = `{}` -Default = `SUBTREE` |==== diff --git a/downstream/modules/platform/ref-images-inventory-variables.adoc b/downstream/modules/platform/ref-images-inventory-variables.adoc new file mode 100644 index 0000000000..ef948e4f36 --- /dev/null +++ b/downstream/modules/platform/ref-images-inventory-variables.adoc @@ -0,0 +1,65 @@ +[id="ref-images-inventory-variables"] + += Image variables + +[cols="50%,50%,50%",options="header"] +|==== +| *RPM variable name* | *Container variable name* | *Description* +| | `controller_image` | {ControllerNameStart} image. + +Default = `controller-rhel8:latest` + +| | `de_extra_images` | Decision environment extra images. +| | `de_supported_image` | Decision environment supported image. + +Default = `de-supported-rhel8:latest` + +| | `eda_image` | {EDAName} image. + +Default = `eda-controller-rhel8:latest` + +| | `eda_web_image` | {EDAName} web image. + +Default = `eda-controller-ui-rhel8:latest` +| | `ee_29_enabled` | Enable {ExecEnvShort} 29. + +Default = `false` + +| | `ee_29_image` | Execution environment 29 image. + +Default = `ee-29-rhel8:latest` + +| | `ee_extra_images` | Execution environment extra images. +| | `ee_minimal_image` | Execution environment minimal image. + +Default = `ee-minimal-rhel8:latest` + +| | `ee_supported_image` | Execution environment supported image. + +Default = `ee-supported-rhel8:latest` + +| | `hub_image` | {HubNameStart} image. + +Default = `hub-rhel8:latest` + +| | `hub_web_image` | {HubNameStart} web image. + +Default = `hub-web-rhel8:latest` + +| | `postgresql_image` | PostgreSQL image. + +Default = `postgresql-15:latest` + +| | `receptor_image` | Receptor image. + +Default = `receptor-rhel8:latest` + +| | `redis_image` | Redis image. + +Default = `redis-6:latest` + +| | `pcp_image` | Performance Co-Pilot image. + +Default = `rhel8-pcp:latest` + +|==== \ No newline at end of file diff --git a/downstream/modules/platform/ref-operator-crs.adoc b/downstream/modules/platform/ref-operator-crs.adoc new file mode 100644 index 0000000000..466af9fee0 --- /dev/null +++ b/downstream/modules/platform/ref-operator-crs.adoc @@ -0,0 +1,736 @@ +[id="operator-crs"] + += Custom resources + +== aap-existing-controller-and-hub-new-eda.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + + controller: + name: existing-controller + disabled: false + + eda: + disabled: false + + hub: + name: existing-hub + disabled: false +---- + +== aap-all-defaults.yml + +[subs="+attributes"] +---- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + + # Platform + ## uncomment to test bundle certs + # bundle_cacert_secret: gateway-custom-certs + + # Components + + hub: + disabled: false + ## uncomment if using file storage for Content pod + storage_type: file + file_storage_storage_class: nfs-local-rwx + file_storage_size: 10Gi + + ## uncomment if using S3 storage for Content pod + # storage_type: S3 + # object_storage_s3_secret: example-galaxy-object-storage + + ## uncomment if using Azure storage for Content pod + # storage_type: azure + # object_storage_azure_secret: azure-secret-name + + # lightspeed: + # disabled: true + +# End state: +# * {ControllerNameStart} deployed and named: myaap-controller +# * * {EDAName} deployed and named: myaap-eda +# * * {HubNameStart} deployed and named: myaap-hub +---- + +== aap-existing-controller-only.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + + controller: + name: existing-controller + + eda: + disabled: true + + hub: + disabled: true + ## uncomment if using file storage for Content pod + # storage_type: file + # file_storage_storage_class: nfs-local-rwx + # file_storage_size: 10Gi + + ## uncomment if using S3 storage for Content pod + # storage_type: S3 + # object_storage_s3_secret: example-galaxy-object-storage + + ## uncomment if using Azure storage for Content pod + # storage_type: azure + # object_storage_azure_secret: azure-secret-name + + +# End state: +# * {ControllerNameStart}: existing-controller registered with {PlatformNameShort} UI +# * * {EDAName} deployed and named: myaap-eda +# * * {HubNameStart} deployed and named: myaap-hub +---- + +== aap-existing-hub-and-controller.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + + controller: + name: existing-controller + disabled: false + + eda: + disabled: true + + hub: + name: existing-hub + disabled: false + +# End state: +# * {ControllerNameStart}: existing-controller registered with {PlatformNameShort} UI +# * * {EDAName} deployed and named: myaap-eda +# * * {HubNameStart}: existing-hub registered with {PlatformNameShort} UI +---- + +== aap-existing-hub-controller-eda.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + + controller: + name: existing-controller # <-- this is the name of the existing AutomationController CR + disabled: false + + eda: + name: existing-eda + disabled: false + + hub: + name: existing-hub + disabled: false + +# End state: +# * Controller: existing-controller registered with {PlatformNameShort} UI +# * * {EDAName}: existing-eda registered with {PlatformNameShort} UI +# * * {HubNameStart}: existing-hub registered with {PlatformNameShort} UI +# +# Note: The {ControllerName}, {EDAName}, and {HubName} names must match the names of the existing. +# {ControllerNameStart}, {EDAName}, and {HubName} CRs in the same namespace as the {PlatformNameShort} CR. If the names do not match, the {PlatformNameShort} CR will not be able to register the existing {ControllerName}, {EDAName}, and {HubName} with the {PlatformNameShort} UI,and will instead deploy new {ControllerName}, {EDAName}, and {HubName} instances. +---- + +== aap-existing-hub-controller-eda.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + + controller: + name: existing-controller # <-- this is the name of the existing AutomationController CR + disabled: false + + eda: + name: existing-eda + disabled: false + + hub: + name: existing-hub + disabled: false + +# End state: +# * {ControllerNameStart}: existing-controller registered with {PlatformNameShort} UI +# * * {EDAName}: existing-eda registered with {PlatformNameShort} UI +# * * {HubNameStart}: existing-hub registered with {PlatformNameShort} UI +# +# Note: The {ControllerName}, {EDAName}, and {HubName} names must match the names of the existing. +# {ControllerNameStart}, {EDAName}, and {HubName} CRs in the same namespace as the {PlatformNameShort} CR. If the names do not match, the {PlatformNameShort} CR will not be able to register the existing {ControllerName}, {EDAName}, and {HubName} with the {PlatformNameShort} UI,and will instead deploy new {ControllerName}, {EDAName}, and {HubName} instances. +---- + +== aap-fresh-controller-eda.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + + controller: + disabled: false + + eda: + disabled: false + + hub: + disabled: true + ## uncomment if using file storage for Content pod + storage_type: file + file_storage_storage_class: nfs-local-rwx + file_storage_size: 10Gi + + ## uncomment if using S3 storage for Content pod + # storage_type: S3 + # object_storage_s3_secret: example-galaxy-object-storage + + ## uncomment if using Azure storage for Content pod + # storage_type: azure + # object_storage_azure_secret: azure-secret-name + +# End state: +# * {ControllerNameStart} deployed and named: myaap-controller +# * * {EDAName} deployed and named: myaap-eda +# * * {HubNameStart} disabled +# * {LightspeedShortName} disabled +---- + +== aap-fresh-external-db.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + + controller: + disabled: false + + eda: + disabled: false + + hub: + disabled: false + ## uncomment if using file storage for Content pod + storage_type: file + file_storage_storage_class: nfs-local-rwx + file_storage_size: 10Gi + + ## uncomment if using S3 storage for Content pod + # storage_type: S3 + # object_storage_s3_secret: example-galaxy-object-storage + + ## uncomment if using Azure storage for Content pod + # storage_type: azure + # object_storage_azure_secret: azure-secret-name + + +# End state: +# * {ControllerNameStart} deployed and named: myaap-controller +# * * {EDAName} deployed and named: myaap-eda +# * * {HubNameStart} deployed and named: myaap-hub +---- + +== aap-configuring-external-db-all-default-components.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + database: + database_secret: external-postgres-configuration-gateway + controller: + postgres_configuration_secret: external-postgres-configuration-controller + hub: + postgres_configuration_secret: external-postgres-configuration-hub + eda: + database: + database_secret: external-postgres-configuration-eda +---- + +== aap-configuring-existing-external-db-all-default-components.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + database: + database_secret: external-postgres-configuration-gateway +---- + +[NOTE] +==== +The system uses the external database for {Gateway}, and {ControllerName}, {HubName}, and {EDAName} continues to use the existing databases that were used in 2.4. +==== + +== aap-configuring-external-db-with-lightspeed-enabled.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + database: + database_secret: external-postgres-configuration-gateway + controller: + postgres_configuration_secret: external-postgres-configuration-controller + hub: + postgres_configuration_secret: external-postgres-configuration-hub + eda: + database: + database_secret: external-postgres-configuration-eda + lightspeed: + disabled: false + database: + database_secret: -postgres-configuration + auth_config_secret_name: 'auth-configuration-secret' + model_config_secret_name: 'model-configuration-secret' +---- + +[NOTE] +==== +You can follow the link:{BaseURL}/red_hat_ansible_lightspeed_with_ibm_watsonx_code_assistant/2.x_latest/html/red_hat_ansible_lightspeed_with_ibm_watsonx_code_assistant_user_guide/index[Red Hat Ansible Lightspeed with IBM watsonx Code Assistant User Guide] for help with creating the model and auth secrets. +==== + +== aap-fresh-install-local-management.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + + # Platform + ## uncomment to test bundle certs + # bundle_cacert_secret: gateway-custom-certs + + # Components + controller: + disabled: false + extra_settings: + - setting: ALLOW_LOCAL_RESOURCE_MANAGEMENT + value: 'True' + + eda: + disabled: false + + extra_settings: + - setting: EDA_ALLOW_LOCAL_RESOURCE_MANAGEMENT + value: '@bool True' + + hub: + disabled: false + ## uncomment if using file storage for Content pod + storage_type: file + file_storage_storage_class: nfs-local-rwx + file_storage_size: 10Gi + + + pulp_settings: + ALLOW_LOCAL_RESOURCE_MANAGEMENT: True + + # cache_enabled: false + # redirect_to_object_storage: "False" + # analytics: false + # galaxy_collection_signing_service: "" + # galaxy_container_signing_service: "" + # token_auth_disabled: 'False' + # token_signature_algorithm: 'ES256' + + ## uncomment if using S3 storage for Content pod + # storage_type: S3 + # object_storage_s3_secret: example-galaxy-object-storage + + ## uncomment if using Azure storage for Content pod + # storage_type: azure + # object_storage_azure_secret: azure-secret-name + + # Development purposes only + no_log: false + + # lightspeed: + # disabled: true + +# End state: +# * {ControllerNameStart} deployed and named: myaap-controller +# * * {EDAName} deployed and named: myaap-eda +# * * {HubNameStart} deployed and named: myaap-hub +---- + +== aap-fresh-install-with-settings.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + image_pull_policy: Always + + # Platform + ## uncomment to test bundle certs + # bundle_cacert_secret: gateway-custom-certs + + # Components + controller: + disabled: false + image_pull_policy: Always + + extra_settings: + - setting: MAX_PAGE_SIZE + value: '501' + + eda: + disabled: false + image_pull_policy: Always + + extra_settings: + - setting: EDA_MAX_PAGE_SIZE + value: '501' + + hub: + disabled: false + image_pull_policy: Always + + ## uncomment if using file storage for Content pod + storage_type: file + file_storage_storage_class: rook-cephfs + file_storage_size: 10Gi + + ## uncomment if using S3 storage for Content pod + # storage_type: S3 + # object_storage_s3_secret: example-galaxy-object-storage + + ## uncomment if using Azure storage for Content pod + # storage_type: azure + # object_storage_azure_secret: azure-secret-name + + pulp_settings: + MAX_PAGE_SIZE: 501 + cache_enabled: false + + # lightspeed: + # disabled: true + +# End state: +# * {ControllerNameStart} deployed and named: myaap-controller +# * * {EDAName} deployed and named: myaap-eda +# * * {HubNameStart} deployed and named: myaap-hub +---- + +== aap-fresh-install.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + + # Redis Mode + # redis_mode: cluster + + # Platform + ## uncomment to test bundle certs + # bundle_cacert_secret: gateway-custom-certs + # extra_settings: + # - setting: MAX_PAGE_SIZE + # value: '501' + + # Components + controller: + disabled: false + + eda: + disabled: false + + hub: + disabled: false + ## uncomment if using file storage for Content pod + storage_type: file + file_storage_storage_class: nfs-local-rwx + file_storage_size: 10Gi + + ## uncomment if using S3 storage for Content pod + # storage_type: S3 + # object_storage_s3_secret: example-galaxy-object-storage + + ## uncomment if using Azure storage for Content pod + # storage_type: azure + # object_storage_azure_secret: azure-secret-name + + # lightspeed: + # disabled: true + +# End state: +# * {ControllerNameStart} deployed and named: myaap-controller +# * * {EDAName} deployed and named: myaap-eda +# * * {HubNameStart} deployed and named: myaap-hub +---- + +== aap-fresh-only-controller.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + + controller: + disabled: false + + eda: + disabled: true + + hub: + disabled: true + ## uncomment if using file storage for Content pod + # storage_type: file + # file_storage_storage_class: nfs-local-rwx + # file_storage_size: 10Gi + + ## uncomment if using S3 storage for Content pod + # storage_type: S3 + # object_storage_s3_secret: example-galaxy-object-storage + + ## uncomment if using Azure storage for Content pod + # storage_type: azure + # object_storage_azure_secret: azure-secret-name + + +# End state: +# * {ControllerNameStart}: existing-controller registered with {PlatformNameShort} UI +# * * {EDAName} deployed and named: myaap-eda +# * * {HubNameStart} deployed and named: myaap-hub +---- + +== aap-fresh-only-hub.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + + controller: + disabled: true + + eda: + disabled: true + + hub: + disabled: false + ## uncomment if using file storage for Content pod + storage_type: file + file_storage_storage_class: nfs-local-rwx + file_storage_size: 10Gi + + # # AaaS Hub Settings + # pulp_settings: + # cache_enabled: false + + ## uncomment if using S3 storage for Content pod + # storage_type: S3 + # object_storage_s3_secret: example-galaxy-object-storage + + ## uncomment if using Azure storage for Content pod + # storage_type: azure + # object_storage_azure_secret: azure-secret-name + + lightspeed: + disabled: false + +# End state: +# * {ControllerNameStart} disabled +# * * {EDAName} disabled +# * * {HubNameStart} deployed and named: myaap-hub +# * {LightspeedShortName} disabled +---- + +== aap-lightspeed-enabled.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + + controller: + disabled: false + + eda: + disabled: false + + hub: + disabled: false + ## uncomment if using file storage for Content pod + storage_type: file + file_storage_storage_class: nfs-local-rwx + file_storage_size: 10Gi + + ## uncomment if using S3 storage for Content pod + # storage_type: S3 + # object_storage_s3_secret: example-galaxy-object-storage + + ## uncomment if using Azure storage for Content pod + # storage_type: azure + # object_storage_azure_secret: azure-secret-name + + lightspeed: + disabled: false + +# End state: +# * {ControllerNameStart} deployed and named: myaap-controller +# * * {EDAName} deployed and named: myaap-eda +# * * {HubNameStart} deployed and named: myaap-hub +# * {LightspeedShortName} deployed and named: myaap-lightspeed +---- + +== gateway-only.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + # Development purposes only + no_log: false + + controller: + disabled: true + + eda: + disabled: true + + hub: + disabled: true + + lightspeed: + disabled: true + +# End state: +# * {GatewayStart} deployed and named: myaap-gateway +# * UI is reachable at: https://myaap-gateway-gateway.apps.ocp4.example.com +# * {ControllerNameStart} is not deployed +# * * {EDAName} is not deployed +# * * {HubNameStart} is not deployed +# * {LightspeedShortName} is not deployed +---- + +== eda-max-running-activations.yml + +[subs="+attributes"] +---- +--- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: myaap +spec: + eda: + extra_settings: + - setting: EDA_MAX_RUNNING_ACTIVATIONS + value: "15" # Setting this value to "-1" means there will be no limit + +---- diff --git a/downstream/modules/platform/ref-operator-mesh-prerequisites.adoc b/downstream/modules/platform/ref-operator-mesh-prerequisites.adoc index 5a3d07c151..8d8e0315de 100644 --- a/downstream/modules/platform/ref-operator-mesh-prerequisites.adoc +++ b/downstream/modules/platform/ref-operator-mesh-prerequisites.adoc @@ -2,7 +2,7 @@ = Prerequisites -The automation mesh is dependent on hop and execution nodes running on {RHEL} (RHEL). +The automation mesh is dependent on hop and execution nodes running on _{RHEL}_ (RHEL). Your {PlatformName} subscription grants you ten {RHEL} licenses that can be used for running components of {PlatformNameShort}. For more information about {RHEL} subscriptions, see link:{BaseURL}/red_hat_enterprise_linux/9/html/configuring_basic_system_settings/assembly_registering-the-system-and-managing-subscriptions_configuring-basic-system-settings[Registering the system and managing subscriptions] in the {RHEL} documentation. @@ -10,9 +10,9 @@ For more information about {RHEL} subscriptions, see link:{BaseURL}/red_hat_ente The following steps prepare the RHEL instances for deployment of the automation mesh. . You require a {RHEL} operating system. -Each node in the mesh requires a static IP address, or a resolvable DNS hostname that {ControllerName} can access. +Each node in the mesh requires a static IP address, or a resolvable DNS hostname that {PlatformNameShort} can access. . Ensure that you have the minimum requirements for the RHEL virtual machine before proceeding. -For more information, see the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_planning_guide/platform-system-requirements[Red Hat Ansible Automation Platform system requirements]. +For more information, see the link:{URLPlanningGuide}/platform-system-requirements[System requirements]. . Deploy the RHEL instances within the remote networks where communication is required. For information about creating virtual machines, see link:{BaseURL}/red_hat_enterprise_linux/9/html/configuring_and_managing_virtualization/assembly_creating-virtual-machines_configuring-and-managing-virtualization[Creating Virtual Machines] in the _{RHEL}_ documentation. Remember to scale the capacity of your virtual machines sufficiently so that your proposed tasks can run on them. diff --git a/downstream/modules/platform/ref-operator-ocp-version.adoc b/downstream/modules/platform/ref-operator-ocp-version.adoc index 3316628567..a26aa391c4 100644 --- a/downstream/modules/platform/ref-operator-ocp-version.adoc +++ b/downstream/modules/platform/ref-operator-ocp-version.adoc @@ -4,7 +4,7 @@ [role="_abstract"] -The {OperatorPlatform} to install {PlatformNameShort} {PlatformVers} is available on {OCPShort} 4.9 and later versions. +The {OperatorPlatformNameShort} to install {PlatformNameShort} {PlatformVers} is available on {OCPShort} 4.12 through to 4.17 and later versions. [role="_additional-resources"] .Additional resources diff --git a/downstream/modules/platform/ref-postgresql-requirements.adoc b/downstream/modules/platform/ref-postgresql-requirements.adoc index d199308bbc..d3b2429ff2 100644 --- a/downstream/modules/platform/ref-postgresql-requirements.adoc +++ b/downstream/modules/platform/ref-postgresql-requirements.adoc @@ -2,15 +2,10 @@ = PostgreSQL requirements -{PlatformName} uses PostgreSQL 13. PostgreSQL user passwords are hashed with SCRAM-SHA-256 secure hashing algorithm before storing in the database. +{PlatformName} {PlatformVers} uses {PostgresVers} and requires the external (customer supported) databases to have ICU support. PostgreSQL user passwords are hashed with SCRAM-SHA-256 secure hashing algorithm before storing in the database. To determine if your {ControllerName} instance has access to the database, you can do so with the command, `awx-manage check_db` command. -.Database - -[cols="a,a,a",options="header"] -|=== -h| Service |Required |Notes // [ddacosta - removed based on AAP-15617]| *Each {ControllerName}* | 40 GB dedicated hard disk space | //* Dedicate a minimum of 20 GB to `/var/` for file and working directory storage. @@ -21,29 +16,21 @@ h| Service |Required |Notes // | *Each {HubName}* | 60 GB dedicated hard disk space | //Storage volume must be rated for a minimum baseline of 1500 IOPS. -| *Database* | -* 20 GB dedicated hard disk space -* 4 CPUs -* 16 GB RAM | +[NOTE] +==== +* {ControllerNameStart} data is stored in the database. +Database storage increases with the number of hosts managed, number of jobs run, number of facts stored in the fact cache, and number of tasks in any individual job. +For example, a playbook runs every hour (24 times a day) across 250 hosts, with 20 tasks, stores over 800000 events in the database every week. -* 150 GB+ recommended -* Storage volume must be rated for a high baseline IOPS (1500 or more). -* All {ControllerName} data is stored in the database. -Database storage increases with the number of hosts managed, number of jobs run, number of facts stored in the fact cache, and number of tasks in any individual job. -For example, a playbook run every hour (24 times a day) across 250 hosts, with 20 tasks, will store over 800000 events in the database every week. -* If not enough space is reserved in the database, the old job runs and facts must be cleaned on a regular basis. For more information, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#assembly-controller-management-jobs[Management Jobs] in the _Automation Controller Administration Guide_. -|=== +* If not enough space is reserved in the database, the old job runs and facts must be cleaned on a regular basis. For more information, see link:{URLControllerAdminGuide}/assembly-controller-management-jobs[Management Jobs] in the _{TitleControllerAdminGuide}_ guide. +==== .PostgreSQL Configurations -Optionally, you can configure the PostgreSQL database as separate nodes that are not managed by the {PlatformName} installer. When the {PlatformNameShort} installer manages the database server, it configures the server with defaults that are generally recommended for most workloads. For more information about the settings you can use to improve database performance, see link:https://docs.ansible.com/automation-controller/latest/html/administration/performance.html#database-settings[Database Settings]. -//----- -//max_connections == 1024 -//shared_buffers == ansible_memtotal_mb*0.3 -//work_mem == ansible_memtotal_mb*0.03 -//maintenance_work_mem == ansible_memtotal_mb*0.04 -//----- +Optionally, you can configure the PostgreSQL database as separate nodes that are not managed by the {PlatformName} installer. +When the {PlatformNameShort} installer manages the database server, it configures the server with defaults that are generally recommended for most workloads. +For more information about the settings you can use to improve database performance, see link:{URLControllerAdminGuide}/assembly-controller-improving-performance#ref-controller-database-settings[PostgreSQL database configuration and maintenance for automation controller ] in the _{TitleControllerAdminGuide}_ guide. [role="_additional-resources"] .Additional resources diff --git a/downstream/modules/platform/ref-projects-collections-support.adoc b/downstream/modules/platform/ref-projects-collections-support.adoc index a8f7694cd5..551ab9840a 100644 --- a/downstream/modules/platform/ref-projects-collections-support.adoc +++ b/downstream/modules/platform/ref-projects-collections-support.adoc @@ -13,7 +13,7 @@ toggle button to *Off*. Roles and collections are locally cached for performance reasons, and you select *Update Revision on Launch* in the project *Options* to ensure this: -image:projects-scm-update-options-update-on-launch-checked.png[update-on-launch] +//image:projects-scm-update-options-update-on-launch-checked.png[update-on-launch] [NOTE] ==== diff --git a/downstream/modules/platform/ref-receptor-inventory-variables.adoc b/downstream/modules/platform/ref-receptor-inventory-variables.adoc new file mode 100644 index 0000000000..5dc42ee716 --- /dev/null +++ b/downstream/modules/platform/ref-receptor-inventory-variables.adoc @@ -0,0 +1,80 @@ + +[id="ref-receptor-inventory-variables"] + += Receptor variables + +[cols="50%,50%,50%",options="header"] +|==== +| *RPM variable name* | *Container variable name* | *Description* + +| | `receptor_disable_signing` | Disable receptor signing. + +Default = `false` + +| | `receptor_disable_tls` | Disable receptor TLS. + +Default = `false` + +| | `receptor_log_level` | Receptor logging level. + +Default = `info` + +| | `receptor_mintls13` | Receptor TLS 1.3 minimal. + +Default = `false` + +| See `peers` for the RPM equivalent variable | `receptor_peers` | + +Used to indicate which nodes a specific host connects to. Wherever this variable is defined, an outbound connection to the specific host is established. + +This variable can be a comma-separated list of hosts only and not groups from the inventory. This is resolved into a set of hosts that is used to construct the `receptor.conf` file. + +For example usage, see link:{URLContainerizedInstall}/aap-containerized-installation#adding-execution-nodes_aap-containerized-installation[Adding execution nodes]. + +Default = [] + +| `receptor_datadir` | | This variable configures the receptor data directory. By default, it is set to `/tmp/receptor`. To change the default location, run the installation script with `"-e receptor_datadir="` and specify the target directory that you want. + +*NOTES* + +* The target directory must be accessible to *awx* users. + +* If the target directory is a temporary file system *tmpfs*, ensure it is remounted correctly after a reboot. Failure to do so results in the receptor no longer having a working directory. + +| `receptor_listener_port` | `receptor_port` | Receptor port number. + +Default = `27199` + +| `receptor_listener_protocol` | `receptor_protocol` | Receptor protocol. + +Default = `tcp` + +| | `receptor_signing_private_key` | Receptor signing private key. +| | `receptor_signing_public_key` | Receptor signing public key. +| | `receptor_signing_remote` | Receptor signing remote files. + +Default = `false` + +| | `receptor_tls_cert` | Path to the SSL/TLS certificate file for receptor. +| | `receptor_tls_key` | Path to the SSL/TLS key file for receptor. +| | `receptor_tls_remote` | Receptor TLS remote files. + +Default = `false` + +| See `node_type` for the RPM equivalent variable. | `receptor_type` a| + +For the `[automationcontroller]` group the two options are: + +* `receptor_type=control` - The node only runs project and inventory updates, but not regular jobs. + +* `receptor_type=hybrid` - The node runs everything. + +Default for this group = `hybrid`. + +For the `[execution_nodes]` group the two options are: + +* `receptor_type=hop` - The node forwards jobs to an execution node. +* `receptor_type=execution` - The node can run jobs. + +Default for this group = `execution` +|==== \ No newline at end of file diff --git a/downstream/modules/platform/ref-redis-config-enterprise-topology.adoc b/downstream/modules/platform/ref-redis-config-enterprise-topology.adoc new file mode 100644 index 0000000000..99f0b221f8 --- /dev/null +++ b/downstream/modules/platform/ref-redis-config-enterprise-topology.adoc @@ -0,0 +1,31 @@ +:_newdoc-version: 2.18.3 +:_template-generated: 2024-10-11 + +:_mod-docs-content-type: REFERENCE + +[id="redis-config-enterprise-topology_{context}"] += Configuring Redis + +{PlatformNameShort} offers a centralized Redis instance in both `standalone` and `clustered` topologies. + +In RPM deployments, the Redis mode is set to `cluster` by default. You can change this setting in the inventory file `[all:vars]` section as in the following example: + +[source,] +---- +[all:vars] +admin_password='' +pg_host='data.example.com' +pg_port='5432' +pg_database='awx' +pg_username='awx' +pg_password='' +pg_sslmode='prefer' # set to 'verify-full' for client-side enforced SSL + +registry_url='registry.redhat.io' +registry_username='' +registry_password='' + +redis_mode=cluster +---- + +For more information about Redis, see link:{URLPlanningGuide}/ha-redis_planning[Caching and queueing system] in _{TitlePlanningGuide}_. \ No newline at end of file diff --git a/downstream/modules/platform/ref-removing-instances.adoc b/downstream/modules/platform/ref-removing-instances.adoc index 2a6b2dab2b..886d5badd3 100644 --- a/downstream/modules/platform/ref-removing-instances.adoc +++ b/downstream/modules/platform/ref-removing-instances.adoc @@ -2,7 +2,7 @@ = Removing Instances -From the *Add instance* page, you can add, remove or run health checks on your nodes. +From the *Instances* page, you can add, remove or run health checks on your nodes. [NOTE] ==== diff --git a/downstream/modules/platform/ref-single-controller-hub-eda-with-managed-db.adoc b/downstream/modules/platform/ref-single-controller-hub-eda-with-managed-db.adoc index e3e1e43f87..6605d6ddf6 100644 --- a/downstream/modules/platform/ref-single-controller-hub-eda-with-managed-db.adoc +++ b/downstream/modules/platform/ref-single-controller-hub-eda-with-managed-db.adoc @@ -15,6 +15,7 @@ Use this example to populate the inventory file to deploy single instances of {C ==== +[literal, subs="+attributes"] ----- [automationcontroller] controller.example.com @@ -64,12 +65,6 @@ automationedacontroller_pg_database='automationedacontroller' automationedacontroller_pg_username='automationedacontroller' automationedacontroller_pg_password='' -# Keystore file to install in SSO node -# sso_custom_keystore_file='/path/to/sso.jks' - -# This install will deploy SSO with sso_use_https=True -# Keystore password is required for https enabled SSO -sso_keystore_password='' # This install will deploy a TLS enabled Automation Hub. # If for some reason this is not the behavior wanted one can diff --git a/downstream/modules/platform/ref-system-requirements.adoc b/downstream/modules/platform/ref-system-requirements.adoc index cd12a0f28d..c8cf731c03 100644 --- a/downstream/modules/platform/ref-system-requirements.adoc +++ b/downstream/modules/platform/ref-system-requirements.adoc @@ -4,7 +4,10 @@ = {PlatformName} system requirements -Your system must meet the following minimum system requirements to install and run {PlatformName}. +Your system must meet the following minimum system requirements to install and run {PlatformName}. +A resilient deployment requires 10 virtual machines with a minimum of 16 gigabytes (GB) of RAM and 4 virtual CPUs (vCPU). +See link:{LinkTopologies} for more information on topology options. + .Base system @@ -14,40 +17,59 @@ Your system must meet the following minimum system requirements to install and r h| Subscription | Valid {PlatformName} | -h| OS | {RHEL} 8.6 or later 64-bit (x86, ppc64le, s390x, aarch64) |{PlatformName} is also supported on OpenShift, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/deploying_the_red_hat_ansible_automation_platform_operator_on_openshift_container_platform/index[Deploying the Red Hat Ansible Automation Platform operator on OpenShift Container Platform] for more information. +h| OS | {RHEL} 8.8 or later (x86_64, aarch64), or {RHEL} 9.2 or later (x86_64, aarch64) |{PlatformName} are also supported on OpenShift, see link:{LinkOperatorInstallation} for more information. + +h| Ansible-core | Ansible-core version {CoreInstVers} or later | {PlatformNameShort} uses the system-wide ansible-core package to install the platform, but uses ansible-core {CoreUseVers} for both its control plane and built-in execution environments. -h| Ansible-core | Ansible-core version {CoreInstVers} or later | {PlatformNameShort} includes execution environments that contain ansible-core {CoreUseVers}. +h| Database | {PostgresVers} | {PlatformName} {PlatformVers} requires the external (customer supported) databases to have ICU support. -h| Python | 3.9 or later | +|=== -h| Browser | A currently supported version of Mozilla FireFox or Google Chrome | +.Virtual machine requirements -h| Database | PostgreSQL version 13 | +[cols="a,a,a,a", options="header"] +|=== +| Component | RAM | VCPU | Storage + +| {GatewayStart} | 16GB | 4 | 20GB minimum +| Control nodes | 16GB | 4 | 80GB minimum with at least 20GB available under `/var/lib/awx` +| Execution nodes | 16GB | 4 | 40GB minimum +| Hop nodes | 16GB | 4 | 40GB minimum +| {HubNameStart} | 16GB | 4 | 40GB minimum allocated to `/var/lib/pulp` +| Database | 16GB | 4 | 100GB minimum allocated to `/var/lib/pgsql` +| {EDAcontroller} | 16GB | 4 | 40GB minimum |=== -The following are necessary for you to work with project updates and collections: +[NOTE] +==== +These are minimum requirements and can be increased for larger workloads in increments of 2x (for example 16GB becomes 32GB and 4 vCPU becomes 8vCPU). See the horizontal scaling guide for more information. +==== + +.Repository requirements + +Enable the following repositories only when installing {PlatformName}: + +* RHEL BaseOS -* Ensure that the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_planning_guide/ref-network-ports-protocols_planning[network ports and protocols] listed in _Table 5.9. Automation Hub_ are available for successful connection and download of collections from {HubName} or {Galaxy} server. -* Disable SSL inspection either when using self-signed certificates or for the Red Hat domains. +* RHEL AppStream [NOTE] ==== -The requirements for systems managed by {PlatformNameShort} are the same as for Ansible. -See link:https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#prerequisites[Installing Ansible] in the Ansible Community Documentation. +If you enable repositories besides those mentioned above, the {PlatformName} installation could fail unexpectedly. ==== -.Additional notes for {PlatformName} requirements +The following are necessary for you to work with project updates and collections: -* {PlatformName} depends on Ansible Playbooks and requires the installation of the latest stable version of ansible-core. You can download ansible-core manually or download it automatically as part of your installation of {PlatformName}. +* Ensure that the link:{URLPlanningGuide}/ref-network-ports-protocols_planning#ref-network-ports-protocols_planning[Network ports and protocols] listed in _Table 6.3. Automation Hub_ are available for successful connection and download of collections from {HubName} or {Galaxy} server. -* For new installations, {ControllerName} installs the latest release package of ansible-core. +.Additional notes for {PlatformName} requirements * If performing a bundled {PlatformNameShort} installation, the installation setup.sh script attempts to install ansible-core (and its dependencies) from the bundle for you. -* If you have installed Ansible manually, the {PlatformNameShort} installation setup.sh script detects that Ansible has been installed and does not attempt to reinstall it. +* If you have installed Ansible-core manually, the {PlatformNameShort} installation setup.sh script detects that Ansible has been installed and does not attempt to reinstall it. [NOTE] ==== -You must install Ansible using a package manager such as `dnf`, and the latest stable version of the package manager must be installed for {PlatformName} to work properly. -Ansible version 2.14 is required for versions {PlatformVers} and later. +You must use Ansible-core, which is installed via dnf. +Ansible-core version {CoreInstVers} is required for versions {PlatformVers} and later. ==== diff --git a/downstream/modules/platform/ref-using-custom-receptor-signing-keys.adoc b/downstream/modules/platform/ref-using-custom-receptor-signing-keys.adoc index 1b83fff58f..fee4f5e6a1 100644 --- a/downstream/modules/platform/ref-using-custom-receptor-signing-keys.adoc +++ b/downstream/modules/platform/ref-using-custom-receptor-signing-keys.adoc @@ -7,9 +7,9 @@ = Using custom Receptor signing keys [role="_abstract"] -Receptor signing is now enabled by default unless `receptor_disable_signing=true` is set, and a RSA key pair (public/private) is generated by the installer. However, you can provide custom RSA public/private keys by setting the path variable. +Receptor signing is enabled by default unless `receptor_disable_signing=true` is set, and an RSA key pair (public and private) is generated by the installation program. However, you can set custom RSA public and private keys by using the following variables: ---- -receptor_signing_private_key=/full/path/to/private/key -receptor_signing_public_key=/full/path/to/public/key +receptor_signing_private_key= +receptor_signing_public_key= ---- diff --git a/downstream/modules/platform/ref-using-custom-tls-certificates.adoc b/downstream/modules/platform/ref-using-custom-tls-certificates.adoc index e8b4a03e82..a241a7ad3a 100644 --- a/downstream/modules/platform/ref-using-custom-tls-certificates.adoc +++ b/downstream/modules/platform/ref-using-custom-tls-certificates.adoc @@ -8,41 +8,58 @@ [role="_abstract"] -By default, the installer generates TLS certificates and keys for all services which are signed by a custom Certificate Authority (CA). You can provide a custom TLS certificate/key for each service. If that certificate is signed by a custom CA, you must provide the CA TLS certificate and key. +By default, the installation program generates self-signed TLS certificates and keys for all {PlatformNameShort} services. -* Certificate Authority ----- -ca_tls_cert=/full/path/to/tls/certificate -ca_tls_key=/full/path/to/tls/key ----- +If you want to replace these with your own custom certificate and key, then set the following inventory file variables: -* Automation Controller ---- -controller_tls_cert=/full/path/to/tls/certificate -controller_tls_key=/full/path/to/tls/key +ca_tls_cert= +ca_tls_key= ---- -* Automation Hub ----- -hub_tls_cert=/full/path/to/tls/certificate -hub_tls_key=/full/path/to/tls/key ----- +If you want to use your own TLS certificates and keys for each service (for example {ControllerName}, {HubName}, {EDAName}), then set the following inventory file variables: -* Automation EDA ----- -eda_tls_cert=/full/path/to/tls/certificate -eda_tls_key=/full/path/to/tls/key +[source,yaml,subs="+attributes"] ---- +# {GatewayStart} +gateway_tls_cert= +gateway_tls_key= +gateway_pg_tls_cert= +gateway_pg_tls_key= +gateway_redis_tls_cert= +gateway_redis_tls_key= -* Postgresql ----- -postgresql_tls_cert=/full/path/to/tls/certificate -postgresql_tls_key=/full/path/to/tls/key ----- +# {ControllerNameStart} +controller_tls_cert= +controller_tls_key= +controller_pg_tls_cert= +controller_pg_tls_key= -* Receptor ----- -receptor_tls_cert=/full/path/to/tls/certificate -receptor_tls_key=/full/path/to/tls/key +# {HubNameStart} +hub_tls_cert= +hub_tls_key= +hub_pg_tls_cert= +hub_pg_tls_key= + +# {EDAName} +eda_tls_cert= +eda_tls_key= +eda_pg_tls_cert= +eda_pg_tls_key= +eda_redis_tls_cert= +eda_redis_tls_key= + +# PostgreSQL +postgresql_tls_cert= +postgresql_tls_key= + +# Receptor +receptor_tls_cert= +receptor_tls_key= ---- +If any of your certificates are signed by a custom Certificate Authority (CA), then you must specify the Certificate Authority's certificate by using the `custom_ca_cert` inventory file variable: + +---- +custom_ca_cert= +---- \ No newline at end of file diff --git a/downstream/modules/playbooks/ref-create-variables.adoc b/downstream/modules/playbooks/ref-create-variables.adoc index ad3955e543..15d9d514a4 100644 --- a/downstream/modules/playbooks/ref-create-variables.adoc +++ b/downstream/modules/playbooks/ref-create-variables.adoc @@ -29,4 +29,4 @@ webservers: vars: ansible_user: my_server_user ---- -For more information about inventories and Ansible inventory variables, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_planning_guide/about_the_installer_inventory_file[About the Installer Inventory file] and link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/appendix-inventory-files-vars[Inventory file variables]. +For more information about inventories and Ansible inventory variables, see link:{URLPlanningGuide}/about_the_installer_inventory_file[About the Installer Inventory file] and link:{URLInstallationGuide}/appendix-inventory-files-vars[Inventory file variables]. diff --git a/downstream/modules/playbooks/ref-playbook-execution.adoc b/downstream/modules/playbooks/ref-playbook-execution.adoc index 703cbb8392..c4466aaac8 100644 --- a/downstream/modules/playbooks/ref-playbook-execution.adoc +++ b/downstream/modules/playbooks/ref-playbook-execution.adoc @@ -11,7 +11,7 @@ At a minimum, each play defines two things: * the managed nodes to target, using a pattern * at least one task to execute -[Note] +[NOTE] ==== In Ansible 2.10 and later, use the fully-qualified collection name in your playbooks to ensure the correct module is selected, because multiple collections can contain modules with the same name (for example, `user`). ==== diff --git a/downstream/modules/topologies/ref-cont-a-env-a.adoc b/downstream/modules/topologies/ref-cont-a-env-a.adoc new file mode 100644 index 0000000000..d2786f32bc --- /dev/null +++ b/downstream/modules/topologies/ref-cont-a-env-a.adoc @@ -0,0 +1,78 @@ +[id="cont-a-env-a"] += Container growth topology + +The growth topology is intended for organizations that are getting started with {PlatformNameShort} and do not require redundancy or higher compute for large volumes of automation. This topology allows for smaller footprint deployments. + +== Infrastructure topology +The following diagram outlines the infrastructure topology that Red{nbsp}Hat has tested with this deployment model that customers can use when self-managing {PlatformNameShort}: + +.Infrastructure topology diagram +image::cont-a-env-a.png[Container growth topology diagram] + +A single VM has been tested with the following component requirements: 16 GB RAM, 4 CPUs, 60 GB local disk, and 3000 IOPS. Resources, such as storage, can be increased based on the needs of the deployment. + +If performing a bundled installation with `hub_seed_collections=true`, then 32 GB RAM is recommended. Note that with this configuration the install time is going to increase and can take 45 or more minutes alone to complete seeding the collections. + +.Infrastructure topology +[options="header"] +|==== +| Purpose | Example group names +| All {PlatformNameShort} components +a| +* `automationgateway` +* `automationcontroller` +* `automationhub` +* `automationeda` +* `database` +|==== + +== Tested system configurations + +Red{nbsp}Hat has tested the following configurations to install and run {PlatformName}: + +.Tested system configurations +[options="header"] +|==== +| Type | Description +| Subscription +a| +* Valid {PlatformName} subscription +* Valid {RHEL} subscription (to consume the BaseOS and AppStream repositories) +| Operating system | {RHEL} 9.2 or later +| CPU architecture | x86_64, AArch64, s390x (IBM Z), ppc64le (IBM Power) +| Ansible-core | Ansible-core version {CoreInstVers} or later +| Browser | A currently supported version of Mozilla Firefox or Google Chrome +| Database | {PostgresVers} +|==== + +== Network ports + +{PlatformName} uses several ports to communicate with its services. These ports must be open and available for incoming connections to the {PlatformName} server for it to work. Ensure that these ports are available and are not blocked by the server firewall. + +.Network ports and protocols +[options="header"] +|==== +| Port number | Protocol | Service | Source | Destination +| 80/443 | TCP | HTTP/HTTPS | {EDAName} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | {EDAName} | {ControllerNameStart} +| 80/443 | TCP | HTTP/HTTPS | {ControllerNameStart} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {ControllerNameStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {EDAName} +| 5432 | TCP | PostgreSQL | {EDAName} | External database +| 5432 | TCP | PostgreSQL | {GatewayStart} | External database +| 5432 | TCP | PostgreSQL | {HubNameStart} | External database +| 5432 | TCP | PostgreSQL | {ControllerNameStart} | External database +| 27199 | TCP | Receptor | {ControllerNameStart} | Execution container +| 6379 | TCP | Redis | {EDAName} | Redis container +| 6379 | TCP | Redis | {GatewayStart} | Redis container +| 8443 | TCP | HTTPS | {GatewayStart} | {GatewayStart} +| 50051 | TCP | gRPC | {GatewayStart} | {GatewayStart} +|==== + +== Example growth inventory file +Use the example inventory file to perform an installation for this topology: + +include::snippets/inventory-cont-a-env-a.adoc[] + +SSH keys are only required when installing on remote hosts. If doing a self contained local VM based installation, you can use `ansible_connection=local`. diff --git a/downstream/modules/topologies/ref-cont-b-env-a.adoc b/downstream/modules/topologies/ref-cont-b-env-a.adoc new file mode 100644 index 0000000000..8dd0c62d2f --- /dev/null +++ b/downstream/modules/topologies/ref-cont-b-env-a.adoc @@ -0,0 +1,94 @@ +[id="cont-b-env-a"] += Container enterprise topology + +The enterprise topology is intended for organizations that require {PlatformNameShort} to be deployed with redundancy or higher compute for large volumes of automation. + +== Infrastructure topology +The following diagram outlines the infrastructure topology that Red{nbsp}Hat has tested with this deployment model that customers can use when self-managing {PlatformNameShort}: + +.Infrastructure topology diagram +image::cont-b-env-a.png[Container enterprise topology diagram] + +Each VM has been tested with the following component requirements: 16 GB RAM, 4 CPUs, 60 GB local disk, and 3000 IOPS. + +.Infrastructure topology +[options="header"] +|==== +| VM count | Purpose | Example VM group names +| 2 | {GatewayStart} with colocated Redis | `automationgateway` +| 2 | {ControllerNameStart} | `automationcontroller` +| 2 | {PrivateHubNameStart} with colocated Redis | `automationhub` +| 2 | {EDAName} with colocated Redis | `automationeda` +| 1 | {AutomationMeshStart} hop node | `execution_nodes` +| 2 | {AutomationMeshStart} execution node | `execution_nodes` +| 1 | Externally managed database service | N/A +| 1 | HAProxy load balancer in front of {Gateway} (externally managed) | N/A +|==== + +[NOTE] +==== +include::snippets/redis-colocation-containerized.adoc[] +==== + +== Tested system configurations + +Red{nbsp}Hat has tested the following configurations to install and run {PlatformName}: + +.Tested system configurations +[options="header"] +|==== +| Type | Description +| Subscription +a| +* Valid {PlatformName} subscription +* Valid {RHEL} subscription (to consume the BaseOS and AppStream repositories) +| Operating system | {RHEL} 9.2 or later +| CPU architecture | x86_64, AArch64, s390x (IBM Z), ppc64le (IBM Power) +| Ansible-core | Ansible-core version {CoreInstVers} or later +| Browser | A currently supported version of Mozilla Firefox or Google Chrome. +| Database | {PostgresVers} +|==== + +== Network ports + +{PlatformName} uses several ports to communicate with its services. These ports must be open and available for incoming connections to the {PlatformName} server for it to work. Ensure that these ports are available and are not blocked by the server firewall. + +.Network ports and protocols +[options="header"] +|==== +| Port number | Protocol | Service | Source | Destination +| 80/443 | TCP | HTTP/HTTPS | {EDAName} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | {EDAName} | {ControllerNameStart} +| 80/443 | TCP | HTTP/HTTPS | {ControllerNameStart} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | HAProxy load balancer | {GatewayStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {ControllerNameStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {EDAName} +| 5432 | TCP | PostgreSQL | {EDAName} | External database +| 5432 | TCP | PostgreSQL | {GatewayStart} | External database +| 5432 | TCP | PostgreSQL | {HubNameStart} | External database +| 5432 | TCP | PostgreSQL | {ControllerNameStart} | External database +| 27199 | TCP | Receptor | {ControllerNameStart} | Hop node and execution node +| 27199 | TCP | Receptor | Hop node | Execution node +| 6379 | TCP | Redis | {EDAName} | Redis node +| 6379 | TCP | Redis | {GatewayStart} | Redis node +| 16379 | TCP | Redis | Redis node | Redis node +| 8443 | TCP | HTTPS | {GatewayStart} | {GatewayStart} +| 50051 | TCP | gRPC | {GatewayStart} | {GatewayStart} +|==== + +== Example enterprise inventory file +Use the example inventory file to perform an installation for this topology: + +include::snippets/inventory-cont-b-env-a.adoc[] + +== Storage requirements +* Execution environments are pulled into {ControllerName} hybrid nodes and execution nodes that run jobs. The size of these containers influences the storage requirements for `$PATH_WHERE_PODMAN_PUTS_CONTAINER_IMAGES`. + +* The primary determining factors for the size of the database and its storage volume, which defaults to `$POSTGRES_DEFAULT_DATA_DIR`, are: +** The quantity of job events (lines of output from {ControllerName} jobs) +** The quantity of days of job data that are retained + +* On execution nodes and {ControllerName} control and hybrid nodes, job output is buffered to the disk in `$NAME_OF_RECEPTOR_DIR_VAR`, which defaults to `/tmp`. + +* The size and quantity of collections synced to {HubName} influence the storage requirements of `$PATH_WHERE_PULP_STORES_COLLECTIONS`. diff --git a/downstream/modules/topologies/ref-mesh-nodes.adoc b/downstream/modules/topologies/ref-mesh-nodes.adoc new file mode 100644 index 0000000000..b761967c73 --- /dev/null +++ b/downstream/modules/topologies/ref-mesh-nodes.adoc @@ -0,0 +1,20 @@ +[id="mesh-nodes"] += {AutomationMeshStart} nodes + +{AutomationMeshStart} is an overlay network intended to ease the distribution of work across a large and dispersed collection of workers. This is done through nodes that establish peer-to-peer connections with each other by using existing networks. + +== Tested system configurations +Each {AutomationMesh} VM has been tested with the following component requirements: 16 GB RAM, 4 CPUs, 60 GB local disk, and 3000 IOPS. + +== Network ports +{AutomationMeshStart} uses several ports to communicate with its services. These ports must be open and available for incoming connections to the {PlatformName} server for it to work. Ensure that these ports are available and are not blocked by the server firewall. + +.Network ports and protocols +[options="header"] +|==== +| Port number | Protocol | Service | Source | Destination +| 27199 | TCP | Receptor | {OCPShort} cluster | Execution node +| 27199 | TCP | Receptor | {OCPShort} cluster | Hop node +| 80/443 | HTTP/HTTPS | Receptor | Execution node | {OCPShort} mesh ingress +| 80/443 | HTTP/HTTPS | Receptor | Hop node | {OCPShort} mesh ingress +|==== \ No newline at end of file diff --git a/downstream/modules/topologies/ref-ocp-a-env-a.adoc b/downstream/modules/topologies/ref-ocp-a-env-a.adoc new file mode 100644 index 0000000000..fe382439c7 --- /dev/null +++ b/downstream/modules/topologies/ref-ocp-a-env-a.adoc @@ -0,0 +1,105 @@ +[id="ocp-a-env-a"] += Operator growth topology + +The growth topology is intended for organizations that are getting started with {PlatformNameShort} and do not require redundancy or higher compute for large volumes of automation. This topology allows for smaller footprint deployments. + +== Infrastructure topology +The following diagram outlines the infrastructure topology that Red{nbsp}Hat has tested with this deployment model that customers can use when self-managing {PlatformNameShort}: + +.Infrastructure topology diagram +image::ocp-a-env-a.png[Operator growth topology diagram] + +A Single Node OpenShift (SNO) cluster has been tested with the following requirements: 32 GB RAM, 16 CPUs, 128 GB local disk, and 3000 IOPS. + +.Infrastructure topology +[options="header"] +|==== +| Count | Component +| 1 | {ControllerNameStart} web pod +| 1 | {ControllerNameStart} task pod +| 1 | {HubNameStart} API pod +| 2 | {HubNameStart} content pod +| 2 | {HubNameStart} worker pod +| 1 | {HubNameStart} Redis pod +| 1 | {EDAName} API pod +| 1 | {EDAName} activation worker pod +| 1 | {EDAName} default worker pod +| 1 | {EDAName} event stream pod +| 1 | {EDAName} scheduler pod +| 1 | {GatewayStart} pod +| 1 | Database pod +| 1 | Redis pod +|==== + +[NOTE] +==== +You can deploy multiple isolated instances of {PlatformNameShort} into the same {OCP} cluster by using a namespace-scoped deployment model. +This approach allows you to use the same cluster for several deployments. +==== + +== Tested system configurations + +Red{nbsp}Hat has tested the following configurations to install and run {PlatformName}: + +.Tested system configurations +[options="header"] +|==== +| Type | Description +| Subscription | Valid {PlatformName} subscription +| Operating system | {RHEL} 9.2 or later +| CPU architecture | x86_64, AArch64, s390x (IBM Z), ppc64le (IBM Power) +| Red Hat OpenShift +a| +* Version: 4.14 +* num_of_control_nodes: 1 +* num_of_worker_nodes: 1 +| Ansible-core | Ansible-core version {CoreInstVers} or later +| Browser | A currently supported version of Mozilla Firefox or Google Chrome. +| Database | {PostgresVers} +|==== + +== Example custom resource file + +Use the following example custom resource (CR) to add your {PlatformNameShort} instance to your project: + +==== +---- +apiVersion: aap.ansible.com/v1alpha1 +kind: AnsibleAutomationPlatform +metadata: + name: +spec: + eda: + automation_server_ssl_verify: 'no' + hub: + storage_type: 's3' + object_storage_s3_secret: '' +---- +==== + +== Nonfunctional requirements + +{PlatformNameShort}'s performance characteristics and capacity are impacted by its resource allocation and configuration. With OpenShift, each {PlatformNameShort} component is deployed as a pod. You can specify resource requests and limits for each pod. + +Use the {PlatformNameShort} Custom Resource (CR) to configure resource allocation for OpenShift installations. Each configurable item has default settings. These settings are the minimum requirements for an installation, but might not meet your production workload needs. + +By default, each component's deployments are set for minimum resource requests but no resource limits. OpenShift only schedules the pods with available resource requests, but the pods are allowed to consume unlimited RAM or CPU provided that the OpenShift worker node itself is not under node pressure. + +In the Operator growth topology, {PlatformNameShort} is deployed on a Single Node OpenShift (SNO) with 32 GB RAM, 16 CPUs, 128 GB Local disk, and 3000 IOPS. This is not a shared environment, so {PlatformNameShort} pods have full access to all of the compute resources of the OpenShift SNO. In this scenario, the capacity calculation for the {ControllerName} task pods is derived from the underlying {OCPShort} node that runs the pod. It does not have access to the entire node. This capacity calculation influences how many concurrent jobs {ControllerName} can run. + +OpenShift manages storage distinctly from VMs. This impacts how {HubName} stores its artifacts. In the Operator growth topology, we use S3 storage because {HubName} requires a `ReadWriteMany` type storage, which is not a default storage type in OpenShift. + +== Network ports + +{PlatformName} uses several ports to communicate with its services. These ports must be open and available for incoming connections to the {PlatformName} server for it to work. Ensure that these ports are available and are not blocked by the server firewall. + +.Network ports and protocols +[options="header"] +|==== +| Port number | Protocol | Service | Source | Destination +| 27199 | TCP | Receptor | {OCPShort} cluster | Execution node +| 27199 | TCP | Receptor | {OCPShort} cluster | Hop node +| 80/443 | HTTP/HTTPS | Receptor | Execution node | {OCPShort} ingress +| 80/443 | HTTP/HTTPS | Receptor | Hop node | {OCPShort} ingress +| 80/443 | HTTP/HTTPS | Platform | Customer clients | {OCPShort} ingress +|==== diff --git a/downstream/modules/topologies/ref-ocp-b-env-a.adoc b/downstream/modules/topologies/ref-ocp-b-env-a.adoc new file mode 100644 index 0000000000..e4b95bb584 --- /dev/null +++ b/downstream/modules/topologies/ref-ocp-b-env-a.adoc @@ -0,0 +1,115 @@ +[id="ocp-b-env-a"] += Operator enterprise topology + +The enterprise topology is intended for organizations that require {PlatformNameShort} to be deployed with redundancy or higher compute for large volumes of automation. + +== Infrastructure topology + +The following diagram outlines the infrastructure topology that Red{nbsp}Hat has tested with this deployment model that customers can use when self-managing {PlatformNameShort}: + +.Infrastructure topology diagram +image::ocp-b-env-a.png[Operator enterprise topology diagram] + +The following infrastructure topology describes an OpenShift Cluster with 3 primary nodes and 2 worker nodes. + +Each OpenShift Worker node has been tested with the following component requirements: 16 GB RAM, 4 CPUs, 128 GB local disk, and 3000 IOPS. + +.Infrastructure topology +[options="header"] +|==== +| Count | Component +| 1 | {ControllerNameStart} web pod +| 1 | {ControllerNameStart} task pod +| 1 | {HubNameStart} API pod +| 2 | {HubNameStart} content pod +| 2 | {HubNameStart} worker pod +| 1 | {HubNameStart} Redis pod +| 1 | {EDAName} API pod +| 2 | {EDAName} activation worker pod +| 2 | {EDAName} default worker pod +| 2 | {EDAName} event stream pod +| 1 | {EDAName} scheduler pod +| 1 | {GatewayStart} pod +| 2 | Mesh ingress pod +| N/A | Externally managed database service +| N/A | Externally managed Redis +| N/A | Externally managed object storage service (for {HubName}) +|==== + +== Tested system configurations + +Red{nbsp}Hat has tested the following configurations to install and run {PlatformName}: + +.Tested system configurations +[options="header"] +|==== +| Type | Description +| Subscription | Valid {PlatformName} subscription +| Operating system | {RHEL} 9.2 or later +| CPU architecture | x86_64, AArch64, s390x (IBM Z), ppc64le (IBM Power) +| Red Hat OpenShift +a| +* Red Hat OpenShift on AWS Hosted Control Planes 4.15.16 +** 2 worker nodes in different availability zones (AZs) at t3.xlarge +| Ansible-core | Ansible-core version {CoreInstVers} or later +| Browser | A currently supported version of Mozilla Firefox or Google Chrome. +| AWS RDS PostgreSQL service +a| +* engine: "postgres" +* engine_version: 15" +* parameter_group_name: "default.postgres15" +* allocated_storage: 20 +* max_allocated_storage: 1000 +* storage_type: "gp2" +* storage_encrypted: true +* instance_class: "db.t4g.small" +* multi_az: true +* backup_retention_period: 5 +* database: must have ICU support +| AWS Memcached Service +a| +* engine: "redis" +* engine_version: "6.2" +* auto_minor_version_upgrade: "false" +* node_type: "cache.t3.micro" +* parameter_group_name: "default.redis6.x.cluster.on" +* transit_encryption_enabled: "true" +* num_node_groups: 2 +* replicas_per_node_group: 1 +* automatic_failover_enabled: true +| s3 storage | HTTPS only accessible through AWS Role assigned to {HubName} SA at runtime by using AWS Pod Identity +|==== + +// == Example custom resource file + +// Use the following example custom resource (CR) to add your {PlatformNameShort} instance to your project: + +== Nonfunctional requirements + +{PlatformNameShort}'s performance characteristics and capacity are impacted by its resource allocation and configuration. With OpenShift, each {PlatformNameShort} component is deployed as a pod. You can specify resource requests and limits for each pod. + +Use the {PlatformNameShort} custom resource to configure resource allocation for OpenShift installations. Each configurable item has default settings. These settings are the exact configuration used within the context of this reference deployment architecture and presumes that the environment is being deployed and managed by an Enterprise IT organization for production purposes. + +By default, each component's deployments are set for minimum resource requests but no resource limits. OpenShift only schedules the pods with available resource requests, but the pods are allowed to consume unlimited RAM or CPU provided that the OpenShift worker node itself is not under node pressure. + +In the Operator enterprise topology, {PlatformNameShort} is deployed on a Red Hat OpenShift on AWS (ROSA) Hosted Control Plane (HCP) cluster with 2 t3.xlarge worker nodes spread across 2 AZs within a single AWS Region. This is not a shared environment, so {PlatformNameShort} pods have full access to all of the compute resources of the ROSA HCP cluster. In this scenario, the capacity calculation for the {ControllerName} task pods is derived from the underlying HCP worker node that runs the pod. It does not have access to the CPU or memory resources of the entire node. This capacity calculation influences how many concurrent jobs {ControllerName} can run. + +OpenShift manages storage distinctly from VMs. This impacts how {HubName} stores its artifacts. In the Operator enterprise topology, we use S3 storage because {HubName} requires a `ReadWriteMany` type storage, which is not a default storage type in OpenShift. Externally provided Redis, PostgreSQL, and object storage for {HubName} are specified. This provides the {PlatformNameShort} deployment with additional scalability and reliability features, including specialized backup, restore, and replication services and scalable storage. + + +== Network ports + +{PlatformName} uses several ports to communicate with its services. These ports must be open and available for incoming connections to the {PlatformName} server for it to work. Ensure that these ports are available and are not blocked by the server firewall. + +.Network ports and protocols +[options="header"] +|==== +| Port number | Protocol | Service | Source | Destination +| 5432 | TCP | PostgreSQL | {OCPShort} cluster | External database service +| 6379 | TCP | Redis | {OCPShort} cluster | External Redis service +| 80/443 | HTTP/HTTPS | Object storage | {OCPShort} cluster | External object storage service +| 27199 | TCP | Receptor | {OCPShort} cluster | Execution node +| 27199 | TCP | Receptor | {OCPShort} cluster | Hop node +| 80/443 | HTTP/HTTPS | Receptor | Execution node | {OCPShort} ingress +| 80/443 | HTTP/HTTPS | Receptor | Hop node | {OCPShort} ingress +|==== diff --git a/downstream/modules/topologies/ref-rpm-a-env-a.adoc b/downstream/modules/topologies/ref-rpm-a-env-a.adoc new file mode 100644 index 0000000000..50be172834 --- /dev/null +++ b/downstream/modules/topologies/ref-rpm-a-env-a.adoc @@ -0,0 +1,73 @@ +[id="rpm-a-env-a"] += RPM growth topology + +The growth topology is intended for organizations that are getting started with {PlatformNameShort} and do not require redundancy or higher compute for large volumes of automation. This topology allows for smaller footprint deployments. + +== Infrastructure topology +The following diagram outlines the infrastructure topology that Red{nbsp}Hat has tested with this deployment model that customers can use when self-managing {PlatformNameShort}: + +.Infrastructure topology diagram +image::rpm-a-env-a.png[RPM growth topology diagram] + +Each virtual machine (VM) has been tested with the following component requirements: 16 GB RAM, 4 CPUs, 60 GB local disk, and 3000 IOPS. + +.Infrastructure topology +[options="header"] +|==== +| VM count | Purpose | Example VM group names +| 1 | {GatewayStart} with colocated Redis | `automationgateway` +| 1 | {ControllerNameStart} | `automationcontroller` +| 1 | {PrivateHubNameStart} | `automationhub` +| 1 | {EDAName} | `automationedacontroller` +| 1 | {AutomationMeshStart} execution node | `execution_nodes` +| 1 | Database | `database` +|==== + +== Tested system configurations + +Red{nbsp}Hat has tested the following configurations to install and run {PlatformName}: + +.Tested system configurations +[options="header"] +|==== +| Type | Description +| Subscription | Valid {PlatformName} subscription +| Operating system | {RHEL} 8.8 or later, or {RHEL} 9.2 or later +| CPU architecture | x86_64, AArch64, s390x (IBM Z), ppc64le (IBM Power) +| Ansible-core | Ansible-core version {CoreInstVers} or later +| Browser | A currently supported version of Mozilla Firefox or Google Chrome +| Database | {PostgresVers} +|==== + +== Network ports + +{PlatformName} uses several ports to communicate with its services. These ports must be open and available for incoming connections to the {PlatformName} server for it to work. Ensure that these ports are available and are not blocked by the server firewall. + +.Network ports and protocols +[options="header"] +|==== +| Port number | Protocol | Service | Source | Destination +| 80/443 | TCP | HTTP/HTTPS | {EDAName} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | {EDAName} | {ControllerNameStart} +| 80/443 | TCP | HTTP/HTTPS | {ControllerNameStart} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {ControllerNameStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {EDAName} +| 5432 | TCP | PostgreSQL | {EDAName} | Database +| 5432 | TCP | PostgreSQL | {GatewayStart} | Database +| 5432 | TCP | PostgreSQL | {HubNameStart} | Database +| 5432 | TCP | PostgreSQL | {ControllerNameStart} | Database +| 27199 | TCP | Receptor | {ControllerNameStart} | Execution node +| 6379 | TCP | Redis | {EDAName} | Redis node +| 6379 | TCP | Redis | {GatewayStart} | Redis node +| 8443 | TCP | HTTPS | {GatewayStart} | {GatewayStart} +| 50051 | TCP | gRPC | {GatewayStart} | {GatewayStart} +|==== + +== Example growth inventory file +Use the example inventory file to perform an installation for this topology: + +include::snippets/inventory-rpm-a-env-a.adoc[] + + + diff --git a/downstream/modules/topologies/ref-rpm-a-env-b.adoc b/downstream/modules/topologies/ref-rpm-a-env-b.adoc new file mode 100644 index 0000000000..80788091ca --- /dev/null +++ b/downstream/modules/topologies/ref-rpm-a-env-b.adoc @@ -0,0 +1,72 @@ +[id="rpm-a-env-b"] += RPM mixed growth topology + +The growth topology is intended for organizations that are getting started with {PlatformNameShort} and do not require redundancy or higher compute for large volumes of automation. This topology allows for smaller footprint deployments. The mixed topology has different versions of {PlatformNameShort} intended for configuring a new installation of {EDAcontroller} 1.1 with {ControllerName} 4.4 or 4.5. + +== Infrastructure topology +The following diagram outlines the infrastructure topology that Red{nbsp}Hat has tested with this deployment model that customers can use when self-managing {PlatformNameShort}: + +.Infrastructure topology diagram +image::rpm-a-env-b.png[RPM mixed growth topology diagram] + +Each virtual machine (VM) has been tested with the following component requirements: 16 GB RAM, 4 CPUs, 60 GB local disk, and 3000 IOPS. + +.Infrastructure topology +[options="header"] +|==== +| VM count | Purpose | {PlatformNameShort} version | Example VM group names +| 1 | {GatewayStart} with colocated Redis | 2.5 | `automationgateway` +| 1 | {ControllerNameStart} | 2.4 | `automationcontroller` +| 1 | {PrivateHubNameStart} | 2.4 | `automationhub` +| 1 | {EDAName} | 2.5 | `automationedacontroller` +| 1 | {AutomationMeshStart} execution node | 2.4 | `execution_nodes` +| 1 | Database | 2.4 | `database` +|==== + +== Tested system configurations + +Red{nbsp}Hat has tested the following configurations to install and run {PlatformName}: + +.Tested system configurations +[options="header"] +|==== +| Type | Description +| Subscription | Valid {PlatformName} subscription +| Operating system | {RHEL} 8.8 or later, or {RHEL} 9.2 or later +| CPU architecture | x86_64, AArch64 +| Ansible-core | Ansible-core version {CoreInstVers} or later +| Browser | A currently supported version of Mozilla Firefox or Google Chrome +| Database | {PostgresVers} +|==== + +== Network ports + +{PlatformName} uses several ports to communicate with its services. These ports must be open and available for incoming connections to the {PlatformName} server for it to work. Ensure that these ports are available and are not blocked by the server firewall. + +.Network ports and protocols +[options="header"] +|==== +| Port number | Protocol | Service | Source | Destination +| 80/443 | TCP | HTTP/HTTPS | {EDAName} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | {EDAName} | {ControllerNameStart} +| 80/443 | TCP | HTTP/HTTPS | {ControllerNameStart} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {ControllerNameStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {EDAName} +| 5432 | TCP | PostgreSQL | {EDAName} | Database +| 5432 | TCP | PostgreSQL | {GatewayStart} | Database +| 5432 | TCP | PostgreSQL | {HubNameStart} | Database +| 5432 | TCP | PostgreSQL | {ControllerNameStart} | Database +| 27199 | TCP | Receptor | {ControllerNameStart} | Execution node +| 6379 | TCP | Redis | {EDAName} | Redis node +| 6379 | TCP | Redis | {GatewayStart} | Redis node +| 8443 | TCP | HTTPS | {GatewayStart} | {GatewayStart} +| 50051 | TCP | gRPC | {GatewayStart} | {GatewayStart} +|==== + +== Example mixed growth inventory file + +Use the example inventory file to perform an installation for this topology: + +include::snippets/inventory-rpm-a-env-b.adoc[] + diff --git a/downstream/modules/topologies/ref-rpm-b-env-a.adoc b/downstream/modules/topologies/ref-rpm-b-env-a.adoc new file mode 100644 index 0000000000..57268cbab8 --- /dev/null +++ b/downstream/modules/topologies/ref-rpm-b-env-a.adoc @@ -0,0 +1,80 @@ +[id="rpm-b-env-a"] += RPM enterprise topology + +The enterprise topology is intended for organizations that require {PlatformNameShort} to be deployed with redundancy or higher compute for large volumes of automation. + +== Infrastructure topology +The following diagram outlines the infrastructure topology that Red{nbsp}Hat has tested with this deployment model that customers can use when self-managing {PlatformNameShort}: + +.Infrastructure topology diagram +image::rpm-b-env-a.png[RPM enterprise topology diagram] + +Each virtual machine (VM) has been tested with the following component requirements: 16 GB RAM, 4 CPUs, 60 GB local disk, and 3000 IOPS. + +.Infrastructure topology +[options="header"] +|==== +| VM count | Purpose | Example VM group names +| 2 | {GatewayStart} with colocated Redis | `automationgateway` +| 2 | {ControllerNameStart} | `automationcontroller` +| 2 | {PrivateHubNameStart} with colocated Redis | `automationhub` +| 2 | {EDAName} with colocated Redis | `automationedacontroller` +| 1 | {AutomationMeshStart} hop node | `execution_nodes` +| 2 | {AutomationMeshStart} execution node | `execution_nodes` +| 1 | Externally managed database service | N/A +| 1 | HAProxy load balancer in front of {Gateway} (externally managed) | N/A +|==== + +[NOTE] +==== +6 VMs are required for a Redis high availability (HA) compatible deployment. Redis can be colocated on each {PlatformNameShort} component VM except for {ControllerName}, execution nodes, or the PostgreSQL database. +==== + +== Tested system configurations + +Red{nbsp}Hat has tested the following configurations to install and run {PlatformName}: + +.Tested system configurations +[options="header"] +|==== +| Type | Description +| Subscription | Valid {PlatformName} subscription +| Operating system | {RHEL} 8.8 or later, or {RHEL} 9.2 or later +| CPU architecture | x86_64, AArch64, s390x (IBM Z), ppc64le (IBM Power) +| Ansible-core | Ansible-core version {CoreInstVers} or later +| Browser | A currently supported version of Mozilla Firefox or Google Chrome +| Database | {PostgresVers} +|==== + +== Network ports + +{PlatformName} uses several ports to communicate with its services. These ports must be open and available for incoming connections to the {PlatformName} server for it to work. Ensure that these ports are available and are not blocked by the server firewall. + +.Network ports and protocols +[options="header"] +|==== +| Port number | Protocol | Service | Source | Destination +| 80/443 | TCP | HTTP/HTTPS | {EDAName} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | {EDAName} | {ControllerNameStart} +| 80/443 | TCP | HTTP/HTTPS | {ControllerNameStart} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | HAProxy load balancer | {GatewayStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {ControllerNameStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {EDAName} +| 5432 | TCP | PostgreSQL | {EDAName} | External database +| 5432 | TCP | PostgreSQL | {GatewayStart} | External database +| 5432 | TCP | PostgreSQL | {HubNameStart} | External database +| 5432 | TCP | PostgreSQL | {ControllerNameStart} | External database +| 27199 | TCP | Receptor | {ControllerNameStart} | Hop node and execution node +| 27199 | TCP | Receptor | Hop node | Execution node +| 6379 | TCP | Redis | {EDAName} | Redis node +| 6379 | TCP | Redis | {GatewayStart} | Redis node +| 16379 | TCP | Redis | Redis node | Redis node +| 8443 | TCP | HTTPS | {GatewayStart} | {GatewayStart} +| 50051 | TCP | gRPC | {GatewayStart} | {GatewayStart} +|==== + +== Example enterprise inventory file +Use the example inventory file to perform an installation for this topology: + +include::snippets/inventory-rpm-b-env-a.adoc[] diff --git a/downstream/modules/topologies/ref-rpm-b-env-b.adoc b/downstream/modules/topologies/ref-rpm-b-env-b.adoc new file mode 100644 index 0000000000..314efdb586 --- /dev/null +++ b/downstream/modules/topologies/ref-rpm-b-env-b.adoc @@ -0,0 +1,80 @@ +[id="rpm-b-env-b"] += RPM mixed enterprise topology + +The enterprise topology is intended for organizations that require {PlatformNameShort} to be deployed with redundancy or higher compute for large volumes of automation. The mixed topology has different versions of {PlatformNameShort} intended for configuring a new installation of {EDAcontroller} 1.1 with {ControllerName} 4.4 or 4.5. + +== Infrastructure topology +The following diagram outlines the infrastructure topology that Red{nbsp}Hat has tested with this deployment model that customers can use when self-managing {PlatformNameShort}: + +.Infrastructure topology diagram +image::rpm-b-env-b.png[RPM mixed enterprise topology diagram] + +Each VM has been tested with the following component requirements: 16 GB RAM, 4 CPUs, 60 GB local disk, and 3000 IOPS. + +.Infrastructure topology +[options="header"] +|==== +| VM count | Purpose | {PlatformNameShort} version | Example VM group names +| 3 | {GatewayStart} with colocated Redis | 2.5 | `automationgateway` +| 2 | {ControllerNameStart} | 2.4 | `automationcontroller` +| 2 | {PrivateHubNameStart} | 2.4 | `automationhub` +| 3 | {EDAName} with colocated Redis | 2.5 | `automationedacontroller` +| 1 | {AutomationMeshStart} hop node | 2.4 | `execution_nodes` +| 2 | {AutomationMeshStart} execution node | 2.4 | `execution_nodes` +| 1 | Externally managed database service | N/A | N/A +| 1 | HAProxy load balancer in front of {Gateway} (externally managed) | N/A | N/A +|==== + +[NOTE] +==== +6 VMs are required for a Redis high availability (HA) compatible deployment. Redis can be colocated on each {PlatformNameShort} {PlatformVers} component VM except for {ControllerName}, execution nodes, or the PostgreSQL database. +==== + +== Tested system configurations + +Red{nbsp}Hat has tested the following configurations to install and run {PlatformName}: + +.Tested system configurations +[options="header"] +|==== +| Type | Description +| Subscription | Valid {PlatformName} subscription +| Operating system | {RHEL} 8.8 or later, or {RHEL} 9.2 or later +| CPU architecture | x86_64, AArch64 +| Ansible-core | Ansible-core version {CoreInstVers} or later +| Browser | A currently supported version of Mozilla Firefox or Google Chrome +| Database | {PostgresVers} +|==== + +== Network ports + +{PlatformName} uses several ports to communicate with its services. These ports must be open and available for incoming connections to the {PlatformName} server for it to work. Ensure that these ports are available and are not blocked by the server firewall. + +.Network ports and protocols +[options="header"] +|==== +| Port number | Protocol | Service | Source | Destination +| 80/443 | TCP | HTTP/HTTPS | {EDAName} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | {EDAName} | {ControllerNameStart} +| 80/443 | TCP | HTTP/HTTPS | {ControllerNameStart} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | HAProxy load balancer | {GatewayStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {ControllerNameStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {HubNameStart} +| 80/443 | TCP | HTTP/HTTPS | {GatewayStart} | {EDAName} +| 5432 | TCP | PostgreSQL | {EDAName} | External database +| 5432 | TCP | PostgreSQL | {GatewayStart} | External database +| 5432 | TCP | PostgreSQL | {HubNameStart} | External database +| 5432 | TCP | PostgreSQL | {ControllerNameStart} | External database +| 27199 | TCP | Receptor | {ControllerNameStart} | Hop node and execution node +| 27199 | TCP | Receptor | Hop node | Execution node +| 6379 | TCP | Redis | {EDAName} | Redis node +| 6379 | TCP | Redis | {GatewayStart} | Redis node +| 16379 | TCP | Redis | Redis node | Redis node +| 8443 | TCP | HTTPS | {GatewayStart} | {GatewayStart} +| 50051 | TCP | gRPC | {GatewayStart} | {GatewayStart} +|==== + +== Example mixed enterprise inventory file +Use the example inventory file to perform an installation for this topology: + +include::snippets/inventory-rpm-b-env-b.adoc[] diff --git a/downstream/modules/topologies/snippets b/downstream/modules/topologies/snippets new file mode 120000 index 0000000000..7bf6da9a51 --- /dev/null +++ b/downstream/modules/topologies/snippets @@ -0,0 +1 @@ +../../snippets \ No newline at end of file diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-aap-packages.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-aap-packages.adoc index 4857536867..a2d21f4a5f 100644 --- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-aap-packages.adoc +++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-aap-packages.adoc @@ -3,4 +3,4 @@ You cannot locate certain packages that come bundled with the {PlatformNameShort} installer, or you are seeing a "Repositories disabled by configuration" message. -To resolve this issue, enable the repository by using the `subscription-manager` command in the command line. For more information about resolving this issue, see the _Troubleshooting_ section of link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_planning_guide/proc-attaching-subscriptions_planning[Attaching your {PlatformName} subscription] in the {PlatformName} Planning Guide. \ No newline at end of file +To resolve this issue, enable the repository by using the `subscription-manager` command in the command line. For more information about resolving this issue, see the _Troubleshooting_ section of link:{URLCentralAuth}/assembly-gateway-licensing#proc-attaching-subscriptions[Attaching your {PlatformName} subscription] in _{TitleCentralAuth}_. \ No newline at end of file diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-invalid-credentials.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-invalid-credentials.adoc index ee0bec48f5..163d1cf22b 100644 --- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-invalid-credentials.adoc +++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-invalid-credentials.adoc @@ -27,5 +27,6 @@ The default value is `-1`, which disables the maximum sessions allowed. This mea * For more information about installing and using the controller node CLI, see link:https://docs.ansible.com/automation-controller/latest/html/controllercli/index.html[AWX Command Line Interface] and link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#assembly-controller-awx-manage-utility[AWX manage utility]. -* For more information about session limits, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/controller-session-limits[Session Limits] in the Automation Controller Administration Guide. +// Michelle - commenting out for now as this content doesn't appear to exist anymore in a published doc +// * For more information about session limits, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/automation_controller_administration_guide/controller-session-limits[Session Limits] in the Automation Controller Administration Guide. diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-pending.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-pending.adoc index a949e9a010..e393e55b46 100644 --- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-pending.adoc +++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-pending.adoc @@ -3,7 +3,7 @@ After launching jobs in {ControllerName}, the jobs stay in a pending state and do not start. -There are a few reasons jobs can become stuck in a pending state. For more information about troubleshooting this issue, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_administration_guide/index#controller-playbook-pending[Playbook stays in pending] in the Automation Controller Administration Guide. +There are a few reasons jobs can become stuck in a pending state. For more information about troubleshooting this issue, see link:{URLControllerAdminGuide}/controller-troubleshooting#controller-playbook-pending[Playbook stays in pending] in _{TitleControllerAdminGuide}_ *Cancel all pending jobs* diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-permissions.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-permissions.adoc index 9cf73e5c63..4583466ab2 100644 --- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-permissions.adoc +++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-permissions.adoc @@ -16,5 +16,4 @@ This issue happens when your {PrivateHubName} is protected with a password or to [role="_additional-resources"] .Additional resources -* For information about creating new credentials in {ControllerName}, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/automation_controller_user_guide/index#controller-getting-started-create-credential[Creating new credentials] in the Automation Controller User Guide. - +* For information about creating new credentials in {ControllerName}, see link:{URLControllerUserGuide}/controller-credentials#controller-create-credential[Creating new credentials] in _{TitleControllerUserGuide}_. diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-resolve-module.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-resolve-module.adoc index 95b8ed7d30..e5be9fd4a5 100644 --- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-resolve-module.adoc +++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-resolve-module.adoc @@ -5,7 +5,7 @@ Jobs are failing with the error message “ERROR! couldn't resolve module/action This error can happen when the collection associated with the module is missing from the {ExecEnvShort}. -The recommended resolution is to create a custom {ExecEnvShort} and add the required collections inside of that {ExecEnvShort}. For more information about creating an {ExecEnvShort}, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/creating_and_consuming_execution_environments/assembly-using-builder[Using {Builder}] in Creating and Consuming Execution Environments. +The recommended resolution is to create a custom {ExecEnvShort} and add the required collections inside of that {ExecEnvShort}. For more information about creating an {ExecEnvShort}, see link:{URLBuilder}/assembly-using-builder[Using {Builder}] in _{TitleBuilder}_. Alternatively, you can complete the following steps: diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-must-gather.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-must-gather.adoc index 71be3dcfcb..f9989aed47 100644 --- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-must-gather.adoc +++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-must-gather.adoc @@ -27,7 +27,7 @@ oc login __ + [subs="+quotes"] ---- -oc adm must-gather --image=registry.redhat.io/ansible-automation-platform-24/aap-must-gather-rhel8 --dest-dir __ +oc adm must-gather --image=registry.redhat.io/ansible-automation-platform-25/aap-must-gather-rhel8 --dest-dir __ ---- + ** `--image` specifies the image that gathers data @@ -37,7 +37,7 @@ oc adm must-gather --image=registry.redhat.io/ansible-automation-platform-24/aap + [subs="+quotes"] ---- -oc adm must-gather --image=registry.redhat.io/ansible-automation-platform-24/aap-must-gather-rhel8 --dest-dir __ – /usr/bin/ns-gather __ +oc adm must-gather --image=registry.redhat.io/ansible-automation-platform-25/aap-must-gather-rhel8 --dest-dir __ – /usr/bin/ns-gather __ ---- + ** `– /usr/bin/ns-gather` limits the `must-gather` data collection to a specified namespace diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-upgrade-issues.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-upgrade-issues.adoc new file mode 100644 index 0000000000..ad33828ab3 --- /dev/null +++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-upgrade-issues.adoc @@ -0,0 +1,18 @@ +[id="troubleshoot-upgrade-issues"] += Issue - When upgrading from {PlatformNameShort} 2.4 to {PlatformVers}, connections to the {ControllerName} API fail if the {ControllerName} is behind a load balancer + +When upgrading {PlatformNameShort} 2.4 to {PlatformVers}, the upgrade is completed; however, connections to the {Gateway} URL fail on the {Gateway} UI if you are using the {ControllerName} behind a load balancer. The following error message is displayed: + +`Error connecting to Controller API` + +To resolve this issue, perform the following tasks for all controller hosts: + +. For each controller host, add the {Gateway} URL as a trusted source in the `CSRF_TRUSTED_ORIGIN` setting in the *settings.py* file. ++ +For example, if you configured the {Gateway} URL as `https://www.example.com`, you must add that URL in the *settings.py* file too as shown below: ++ +---- +CSRF_TRUSTED_ORIGINS = ['https://appX.example.com:8443','https://www.example.com'] +---- + +. Restart each controller host by using the `automation-controller-service restart` command so that the URL changes are implemented. For the procedure, see link:{URLControllerAdminGuide}/controller-start-stop-controller[Start, stop, and restart {ControllerName}] in _{TitleControllerAdminGuide}_. \ No newline at end of file diff --git a/downstream/snippets/container-upgrades.adoc b/downstream/snippets/container-upgrades.adoc new file mode 100644 index 0000000000..f32a83504c --- /dev/null +++ b/downstream/snippets/container-upgrades.adoc @@ -0,0 +1 @@ +Upgrades from 2.4 Containerized {PlatformNameShort} Tech Preview to 2.5 Containerized {PlatformNameShort} are not supported at this time. \ No newline at end of file diff --git a/downstream/snippets/deprecated-features.adoc b/downstream/snippets/deprecated-features.adoc index 4fa07cbb4b..752d75b62d 100644 --- a/downstream/snippets/deprecated-features.adoc +++ b/downstream/snippets/deprecated-features.adoc @@ -1 +1 @@ -Deprecated functionality is still included in {PlatformNameShort} and continues to be supported. However, the functionality will be removed in a future release of {PlatformNameShort} and is not recommended for new deployments. \ No newline at end of file +Deprecated functionality is still included in {PlatformNameShort} and continues to be supported during this version's support cycle. However, the functionality will be removed in a future release of {PlatformNameShort} and is not recommended for new deployments. \ No newline at end of file diff --git a/downstream/snippets/docker-devcontainer.json b/downstream/snippets/docker-devcontainer.json new file mode 100644 index 0000000000..f79edca4bb --- /dev/null +++ b/downstream/snippets/docker-devcontainer.json @@ -0,0 +1,20 @@ +---- +{ + "name": "ansible-dev-container-docker", + "image": "registry.redhat.io/ansible-automation-platform-25/ansible-dev-tools-rhel8:latest", + "containerUser": "root", + "runArgs": [ + "--privileged", + "--device", + "/dev/fuse", + "--hostname=ansible-dev-container" + ], + "updateRemoteUserUID": true, + "customizations": { + "vscode": { + "extensions": ["redhat.ansible"] + } + } +} +---- +// From https://github.com/ansible/ansible-dev-tools/blob/main/.devcontainer/docker/devcontainer.json diff --git a/downstream/snippets/inventory-cont-a-env-a.adoc b/downstream/snippets/inventory-cont-a-env-a.adoc new file mode 100644 index 0000000000..de41b4f506 --- /dev/null +++ b/downstream/snippets/inventory-cont-a-env-a.adoc @@ -0,0 +1,83 @@ +//Inventory file for CONT A ENV A topology + +[source,yaml,subs="+attributes"] +---- +# This is the {PlatformNameShort} installer inventory file intended for the container growth deployment topology. +# This inventory file expects to be run from the host where {PlatformNameShort} will be installed. +# Consult the {PlatformNameShort} product documentation about this topology's tested hardware configuration. +# {URLTopologies}/container-topologies +# +# Consult the docs if you are unsure what to add +# For all optional variables consult the included README.md +# or the {PlatformNameShort} documentation: +# {URLContainerizedInstall} + +# This section is for your {Gateway} hosts +# ----------------------------------------------------- +[automationgateway] +aap.example.org + +# This section is for your {ControllerName} hosts +# ------------------------------------------------- +[automationcontroller] +aap.example.org + +# This section is for your {HubName} hosts +# ----------------------------------------------------- +[automationhub] +aap.example.org + +# This section is for your {EDAcontroller} hosts +# ----------------------------------------------------- +[automationeda] +aap.example.org + +# This section is for the {PlatformNameShort} database +# -------------------------------------- +[database] +aap.example.org + +[all:vars] +# Ansible +ansible_connection=local + +# Common variables +# {URLContainerizedInstall}/appendix-inventory-files-vars#ref-general-inventory-variables +# ----------------------------------------------------- +postgresql_admin_username=postgres +postgresql_admin_password= + +registry_username= +registry_password= + +redis_mode=standalone + +# {GatewayStart} +# {URLContainerizedInstall}/appendix-inventory-files-vars#ref-gateway-variables +# ----------------------------------------------------- +gateway_admin_password= +gateway_pg_host=aap.example.org +gateway_pg_password= + +# {ControllerNameStart} +# {URLContainerizedInstall}/appendix-inventory-files-vars#ref-controller-variables +# ----------------------------------------------------- +controller_admin_password= +controller_pg_host=aap.example.org +controller_pg_password= + +# {HubNameStart} +# {URLContainerizedInstall}/appendix-inventory-files-vars#ref-hub-variables +# ----------------------------------------------------- +hub_admin_password= +hub_pg_host=aap.example.org +hub_pg_password= +hub_seed_collections=false + +# {EDAcontroller} +# {URLContainerizedInstall}/appendix-inventory-files-vars#event-driven-ansible-controller +# ----------------------------------------------------- +eda_admin_password= +eda_pg_host=aap.example.org +eda_pg_password= +---- \ No newline at end of file diff --git a/downstream/snippets/inventory-cont-b-env-a.adoc b/downstream/snippets/inventory-cont-b-env-a.adoc new file mode 100644 index 0000000000..23b06245a2 --- /dev/null +++ b/downstream/snippets/inventory-cont-b-env-a.adoc @@ -0,0 +1,95 @@ +//Inventory file for CONT B ENV A topology + +[source,yaml,subs="+attributes"] +---- +# This is the {PlatformNameShort} enterprise installer inventory file +# Consult the docs if you are unsure what to add +# For all optional variables consult the included README.md +# or the Red Hat documentation: +# {URLContainerizedInstall} + +# This section is for your {Gateway} hosts +# ----------------------------------------------------- +[automationgateway] +gateway1.example.org +gateway2.example.org + +# This section is for your {ControllerName} hosts +# ----------------------------------------------------- +[automationcontroller] +controller1.example.org +controller2.example.org + +# This section is for your {PlatformNameShort} execution hosts +# ----------------------------------------------------- +[execution_nodes] +hop1.example.org receptor_type='hop' +exec1.example.org +exec2.example.org + +# This section is for your {HubName} hosts +# ----------------------------------------------------- +[automationhub] +hub1.example.org +hub2.example.org + +# This section is for your {EDAcontroller} hosts +# ----------------------------------------------------- +[automationeda] +eda1.example.org +eda2.example.org + +[redis] +gateway1.example.org +gateway2.example.org +hub1.example.org +hub2.example.org +eda1.example.org +eda2.example.org + +[all:vars] + +# Common variables +# {URLContainerizedInstall}/appendix-inventory-files-vars#ref-general-inventory-variables +# ----------------------------------------------------- +postgresql_admin_username= +postgresql_admin_password= +registry_username= +registry_password= + +# {GatewayStart} +# {URLContainerizedInstall}/appendix-inventory-files-vars#ref-gateway-variables +# ----------------------------------------------------- +gateway_admin_password= +gateway_pg_host=externaldb.example.org +gateway_pg_database= +gateway_pg_username= +gateway_pg_password= + +# {ControllerNameStart} +# {URLContainerizedInstall}/appendix-inventory-files-vars#ref-controller-variables +# ----------------------------------------------------- +controller_admin_password= +controller_pg_host=externaldb.example.org +controller_pg_database= +controller_pg_username= +controller_pg_password= + +# {HubNameStart} +# {URLContainerizedInstall}/appendix-inventory-files-vars#ref-hub-variables +# ----------------------------------------------------- +hub_admin_password= +hub_pg_host=externaldb.example.org +hub_pg_database= +hub_pg_username= +hub_pg_password= + +# {EDAcontroller} +# {URLContainerizedInstall}/appendix-inventory-files-vars#event-driven-ansible-controller +# ----------------------------------------------------- +eda_admin_password= +eda_pg_host=externaldb.example.org +eda_pg_database= +eda_pg_username= +eda_pg_password= +---- \ No newline at end of file diff --git a/downstream/snippets/inventory-rpm-a-env-a.adoc b/downstream/snippets/inventory-rpm-a-env-a.adoc new file mode 100644 index 0000000000..3111f2f573 --- /dev/null +++ b/downstream/snippets/inventory-rpm-a-env-a.adoc @@ -0,0 +1,84 @@ +//Inventory file for RPM A ENV A topology + +[source,yaml,subs="+attributes"] +---- +# This is the {PlatformNameShort} installer inventory file intended for the RPM growth deployment topology. +# Consult the {PlatformNameShort} product documentation about this topology's tested hardware configuration. +# {URLTopologies}/rpm-topologies +# +# Consult the docs if you are unsure what to add +# For all optional variables consult the {PlatformNameShort} documentation: +# {URLInstallationGuide} + + +# This section is for your {Gateway} hosts +# ----------------------------------------------------- +[automationgateway] +gateway.example.org + +# This section is for your {ControllerName} hosts +# ----------------------------------------------------- +[automationcontroller] +controller.example.org + +[automationcontroller:vars] +peers=execution_nodes + +# This section is for your {PlatformNameShort} execution hosts +# ----------------------------------------------------- +[execution_nodes] +exec.example.org + +# This section is for your {HubName} hosts +# ----------------------------------------------------- +[automationhub] +hub.example.org + +# This section is for your {EDAcontroller} hosts +# ----------------------------------------------------- +[automationedacontroller] +eda.example.org + +# This section is for the {PlatformNameShort} database +# ----------------------------------------------------- +[database] +db.example.org + +[all:vars] + +# Common variables +# {URLInstallationGuide}/appendix-inventory-files-vars#ref-general-inventory-variables +# ----------------------------------------------------- +registry_username= +registry_password= + +redis_mode=standalone + +# {GatewayStart} +# {URLInstallationGuide}/appendix-inventory-files-vars#ref-gateway-variables +# ----------------------------------------------------- +automationgateway_admin_password= +automationgateway_pg_host=db.example.org +automationgateway_pg_password= + +# {ControllerNameStart} +# {URLInstallationGuide}/appendix-inventory-files-vars#ref-controller-variables +# ----------------------------------------------------- +admin_password= +pg_host=db.example.org +pg_password= + +# {HubNameStart} +# {URLInstallationGuide}/appendix-inventory-files-vars#ref-hub-variables +# ----------------------------------------------------- +automationhub_admin_password= +automationhub_pg_host=db.example.org +automationhub_pg_password= + +# {EDAcontroller} +# {URLInstallationGuide}/appendix-inventory-files-vars#event-driven-ansible-controller +# ----------------------------------------------------- +automationedacontroller_admin_password= +automationedacontroller_pg_host=db.example.org +automationedacontroller_pg_password= +---- \ No newline at end of file diff --git a/downstream/snippets/inventory-rpm-a-env-b.adoc b/downstream/snippets/inventory-rpm-a-env-b.adoc new file mode 100644 index 0000000000..59e43c399c --- /dev/null +++ b/downstream/snippets/inventory-rpm-a-env-b.adoc @@ -0,0 +1,46 @@ +//Inventory file for RPM A ENV B topology + +[source,yaml,subs="+attributes"] +---- +# This is the {PlatformNameShort} installer inventory file intended for the mixed RPM growth deployment topology. +# Consult the {PlatformNameShort} product documentation about this topology's tested hardware configuration. +# {URLTopologies}/rpm-topologies +# +# Consult the docs if you are unsure what to add +# For all optional variables consult the Red Hat documentation: +# {URLInstallationGuide} + +# This section is for your {Gateway} hosts +# ----------------------------------------------------- +[automationgateway] +gateway.example.org + +# This section is for your {EDAcontroller} hosts +# ----------------------------------------------------- +[automationedacontroller] +eda.example.org + +[all:vars] + +# Common variables +# {URLInstallationGuide}/appendix-inventory-files-vars#ref-general-inventory-variables +# ----------------------------------------------------- +registry_username= +registry_password= + +redis_mode=standalone + +# {GatewayStart} +# {URLInstallationGuide}/appendix-inventory-files-vars#ref-gateway-variables +# ----------------------------------------------------- +automationgateway_admin_password= +automationgateway_pg_host=db.example.org +automationgateway_pg_password= + +# {EDAcontroller} +# {URLInstallationGuide}/appendix-inventory-files-vars#event-driven-ansible-controller +# ----------------------------------------------------- +automationedacontroller_admin_password= +automationedacontroller_pg_host=db.example.org +automationedacontroller_pg_password= +---- \ No newline at end of file diff --git a/downstream/snippets/inventory-rpm-b-env-a.adoc b/downstream/snippets/inventory-rpm-b-env-a.adoc new file mode 100644 index 0000000000..a7d488120d --- /dev/null +++ b/downstream/snippets/inventory-rpm-b-env-a.adoc @@ -0,0 +1,94 @@ +//Inventory file for RPM B ENV A topology + +[source,yaml,subs="+attributes"] +---- +# This is the {PlatformNameShort} enterprise installer inventory file +# Consult the docs if you are unsure what to add +# For all optional variables consult the Red Hat documentation: +# {URLInstallationGuide} + +# This section is for your {Gateway} hosts +# ----------------------------------------------------- +[automationgateway] +gateway1.example.org +gateway2.example.org + +# This section is for your {ControllerName} hosts +# ----------------------------------------------------- +[automationcontroller] +controller1.example.org +controller2.example.org + +[automationcontroller:vars] +peers=execution_nodes + +# This section is for your {PlatformNameShort} execution hosts +# ----------------------------------------------------- +[execution_nodes] +hop1.example.org node_type='hop' +exec1.example.org +exec2.example.org + +# This section is for your {HubName} hosts +# ----------------------------------------------------- +[automationhub] +hub1.example.org +hub2.example.org + +# This section is for your {EDAcontroller} hosts +# ----------------------------------------------------- +[automationedacontroller] +eda1.example.org +eda2.example.org + +[redis] +gateway1.example.org +gateway2.example.org +hub1.example.org +hub2.example.org +eda1.example.org +eda2.example.org + +[all:vars] +# Common variables +# {URLInstallationGuide}/appendix-inventory-files-vars#ref-general-inventory-variables +# ----------------------------------------------------- +registry_username= +registry_password= + +# {GatewayStart} +# {URLInstallationGuide}/appendix-inventory-files-vars#ref-gateway-variables +# ----------------------------------------------------- +automationgateway_admin_password= +automationgateway_pg_host= +automationgateway_pg_database= +automationgateway_pg_username= +automationgateway_pg_password= + +# {ControllerNameStart} +# {URLInstallationGuide}/appendix-inventory-files-vars#ref-controller-variables +# ----------------------------------------------------- +admin_password= +pg_host= +pg_database= +pg_username= +pg_password= + +# {HubNameStart} +# {URLInstallationGuide}/appendix-inventory-files-vars#ref-hub-variables +# ----------------------------------------------------- +automationhub_admin_password= +automationhub_pg_host= +automationhub_pg_database= +automationhub_pg_username= +automationhub_pg_password= + +# {EDAcontroller} +# {URLInstallationGuide}/appendix-inventory-files-vars#event-driven-ansible-controller +# ----------------------------------------------------- +automationedacontroller_admin_password= +automationedacontroller_pg_host= +automationedacontroller_pg_database= +automationedacontroller_pg_username= +automationedacontroller_pg_password= +---- \ No newline at end of file diff --git a/downstream/snippets/inventory-rpm-b-env-b.adoc b/downstream/snippets/inventory-rpm-b-env-b.adoc new file mode 100644 index 0000000000..39a6c1a17c --- /dev/null +++ b/downstream/snippets/inventory-rpm-b-env-b.adoc @@ -0,0 +1,56 @@ +//Inventory file for RPM B ENV B topology + +[source,yaml,subs="+attributes"] +---- +# This is the {PlatformNameShort} mixed enterprise installer inventory file +# Consult the docs if you are unsure what to add +# For all optional variables consult the Red Hat documentation: +# {URLInstallationGuide} + +# This section is for your {Gateway} hosts +# ----------------------------------------------------- +[automationgateway] +gateway1.example.org +gateway2.example.org +gateway3.example.org + +# This section is for your {EDAcontroller} hosts +# ----------------------------------------------------- +[automationedacontroller] +eda1.example.org +eda2.example.org +eda3.example.org + +[redis] +gateway1.example.org +gateway2.example.org +gateway3.example.org +eda1.example.org +eda2.example.org +eda3.example.org + +[all:vars] +# Common variables +# {URLInstallationGuide}/appendix-inventory-files-vars#ref-general-inventory-variables +# ----------------------------------------------------- +registry_username= +registry_password= + +# {GatewayStart} +# {URLInstallationGuide}/appendix-inventory-files-vars#ref-gateway-variables +# ----------------------------------------------------- +automationgateway_admin_password= +automationgateway_pg_host= +automationgateway_pg_database= +automationgateway_pg_username= +automationgateway_pg_password= + +# {EDAcontroller} +# {URLInstallationGuide}/appendix-inventory-files-vars#event-driven-ansible-controller +# ----------------------------------------------------- +automationedacontroller_admin_password= +automationedacontroller_pg_host= +automationedacontroller_pg_database= +automationedacontroller_pg_username= +automationedacontroller_pg_password= +---- \ No newline at end of file diff --git a/downstream/snippets/podman-devcontainer.json b/downstream/snippets/podman-devcontainer.json new file mode 100644 index 0000000000..749dbfd616 --- /dev/null +++ b/downstream/snippets/podman-devcontainer.json @@ -0,0 +1,31 @@ +---- +{ + "name": "ansible-dev-container-podman", + "image": "registry.redhat.io/ansible-automation-platform-25/ansible-dev-tools-rhel8:latest", + "containerUser": "root", + "runArgs": [ + "--cap-add=CAP_MKNOD", + "--cap-add=NET_ADMIN", + "--cap-add=SYS_ADMIN", + "--cap-add=SYS_RESOURCE", + "--device", + "/dev/fuse", + "--security-opt", + "seccomp=unconfined", + "--security-opt", + "label=disable", + "--security-opt", + "apparmor=unconfined", + "--security-opt", + "unmask=/sys/fs/cgroup", + "--userns=host", + "--hostname=ansible-dev-container" + ], + "customizations": { + "vscode": { + "extensions": ["redhat.ansible"] + } + } +} +---- +// From https://github.com/ansible/ansible-dev-tools/blob/main/.devcontainer/podman/devcontainer.json diff --git a/downstream/snippets/redis-colocation-containerized.adoc b/downstream/snippets/redis-colocation-containerized.adoc new file mode 100644 index 0000000000..d4de91f5d0 --- /dev/null +++ b/downstream/snippets/redis-colocation-containerized.adoc @@ -0,0 +1,2 @@ +//This snippet details the colocation configuration for a containerized install of AAP - note that it can be colocated with controller. +* 6 VMs are required for a Redis high availability (HA) compatible deployment. When installing {PlatformNameShort} with the containerized installer, Redis can be colocated on any {PlatformNameShort} component VMs of your choice except for execution nodes or the PostgreSQL database. They might also be assigned VMs specifically for Redis use. diff --git a/downstream/snippets/snip-gateway-component-description.adoc b/downstream/snippets/snip-gateway-component-description.adoc new file mode 100644 index 0000000000..5f55154a32 --- /dev/null +++ b/downstream/snippets/snip-gateway-component-description.adoc @@ -0,0 +1 @@ +{GatewayStart} is the service that handles authentication and authorization for the {PlatformNameShort}. It provides a single entry into the {PlatformNameShort} and serves the platform user interface so you can authenticate and access all of the {PlatformNameShort} services from a single location. diff --git a/downstream/snippets/snip-gw-authentication-additional-auth-fields.adoc b/downstream/snippets/snip-gw-authentication-additional-auth-fields.adoc new file mode 100644 index 0000000000..72af8900fc --- /dev/null +++ b/downstream/snippets/snip-gw-authentication-additional-auth-fields.adoc @@ -0,0 +1,6 @@ +. Optional: Enter any *Additional Authenticator Fields* that this authenticator can take. These fields are not validated and are passed directly back to the authenticator. ++ +[NOTE] +==== +Values defined in this field override the dedicated fields provided in the UI. +==== \ No newline at end of file diff --git a/downstream/snippets/snip-gw-authentication-common-checkboxes.adoc b/downstream/snippets/snip-gw-authentication-common-checkboxes.adoc new file mode 100644 index 0000000000..d1c8921e0f --- /dev/null +++ b/downstream/snippets/snip-gw-authentication-common-checkboxes.adoc @@ -0,0 +1,3 @@ +. To automatically create organizations, users, and teams upon successful login, select *Create objects*. +. To enable this authentication method upon creation, select *Enabled*. +. To remove a user for any groups they were previously added to when they authenticate from this source, select *Remove users*. diff --git a/downstream/snippets/snip-gw-authentication-next-steps.adoc b/downstream/snippets/snip-gw-authentication-next-steps.adoc new file mode 100644 index 0000000000..6805d9bf16 --- /dev/null +++ b/downstream/snippets/snip-gw-authentication-next-steps.adoc @@ -0,0 +1 @@ +To control which users are allowed into the {PlatformNameShort} server, and placed into {PlatformNameShort} organizations or teams based on their attributes (like username and email address) or to what groups they belong, continue to link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/access_management_and_authentication/index#gw-mapping[Mapping]. \ No newline at end of file diff --git a/downstream/snippets/snip-gw-authentication-verification.adoc b/downstream/snippets/snip-gw-authentication-verification.adoc new file mode 100644 index 0000000000..a19b861830 --- /dev/null +++ b/downstream/snippets/snip-gw-authentication-verification.adoc @@ -0,0 +1,3 @@ +.Verification + +To verify that the authentication is configured correctly, log out of {PlatformNameShort} and check that the login screen displays the logo of your authentication chosen method to enable logging in with those credentials. \ No newline at end of file diff --git a/downstream/snippets/snip-gw-mapping-next-steps.adoc b/downstream/snippets/snip-gw-mapping-next-steps.adoc new file mode 100644 index 0000000000..bb256d4888 --- /dev/null +++ b/downstream/snippets/snip-gw-mapping-next-steps.adoc @@ -0,0 +1,9 @@ +. You can manage the authentication mappings order by dragging and dropping the mapping up or down in the list. ++ +[NOTE] +==== +The mapping precedence is determined by the order in which the mappings are listed. +==== ++ +. Click btn:[Next] to review and verify the mapping configurations. +. Click btn:[Finish]. diff --git a/downstream/snippets/snip-gw-roles-note-multiple-components.adoc b/downstream/snippets/snip-gw-roles-note-multiple-components.adoc new file mode 100644 index 0000000000..81796a1ec4 --- /dev/null +++ b/downstream/snippets/snip-gw-roles-note-multiple-components.adoc @@ -0,0 +1,4 @@ +[NOTE] +==== +If you have multiple {PlatformNameShort} components installed, you will see selections for the roles associated with each component in the *Roles* menu bar. For example, Automation Execution for {ControllerName} roles, Automation Decisions for {EDAName} roles. +==== \ No newline at end of file diff --git a/downstream/snippets/technology-preview.adoc b/downstream/snippets/technology-preview.adoc index 4f60f2f7a7..62fbd6aaf7 100644 --- a/downstream/snippets/technology-preview.adoc +++ b/downstream/snippets/technology-preview.adoc @@ -1,3 +1,5 @@ -Technology Preview features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. +Technology Preview features are not supported with Red{nbsp}Hat production service level agreements (SLAs) and might not be functionally complete. +Red{nbsp}Hat does not recommend using them in production. +These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. -For more information about the support scope of Red Hat Technology Preview features, see link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Support Scope]. \ No newline at end of file +For more information about the support scope of Red{nbsp}Hat Technology Preview features, see link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Support Scope]. diff --git a/downstream/titles/aap-containerized-install/docinfo.xml b/downstream/titles/aap-containerized-install/docinfo.xml index 3fff7e069d..47f1005992 100644 --- a/downstream/titles/aap-containerized-install/docinfo.xml +++ b/downstream/titles/aap-containerized-install/docinfo.xml @@ -1,9 +1,9 @@ -Containerized Ansible Automation Platform installation guide +Containerized installation Red Hat Ansible Automation Platform 2.5 -Containerized Ansible Automation Platform Installation Guide +Install the containerized version of Ansible Automation Platform -Containerized Ansible Automation Platform Installation Guide +This guide helps you to understand the installation requirements and processes behind our containerized version of Ansible Automation Platform. Red Hat Customer Content Services diff --git a/downstream/titles/aap-containerized-install/master.adoc b/downstream/titles/aap-containerized-install/master.adoc index 1a5cf65b27..982f5a4b93 100644 --- a/downstream/titles/aap-containerized-install/master.adoc +++ b/downstream/titles/aap-containerized-install/master.adoc @@ -1,13 +1,20 @@ :imagesdir: images :toclevels: 4 :experimental: - +:container-install: include::attributes/attributes.adoc[] // Book Title -= Containerized Ansible Automation Platform installation guide += Containerized installation include::{Boilerplate}[] include::platform/assembly-aap-containerized-installation.adoc[leveloffset=+1] +include::platform/assembly-horizontal-scaling.adoc[leveloffset=+1] + +[appendix] +include::platform/assembly-appendix-troubleshoot-containerized-aap.adoc[leveloffset=1] + +[appendix] +include::platform/assembly-appendix-inventory-file-vars.adoc[leveloffset=1] \ No newline at end of file diff --git a/downstream/titles/aap-hardening/docinfo.xml b/downstream/titles/aap-hardening/docinfo.xml index e854befb03..a5430ad33c 100644 --- a/downstream/titles/aap-hardening/docinfo.xml +++ b/downstream/titles/aap-hardening/docinfo.xml @@ -1,7 +1,7 @@ -Red Hat Ansible Automation Platform hardening guide +Hardening and compliance Red Hat Ansible Automation Platform 2.5 -Install, configure, and maintain Ansible Automation Platform running on Red Hat Enterprise Linux in a secure manner. +Install, configure, and maintain Ansible Automation Platform running on Red Hat Enterprise Linux in a secure manner This guide provides recommended practices for various processes needed to install, configure, and maintain {PlatformNameShort} on Red Hat Enterprise Linux in a secure manner. diff --git a/downstream/titles/aap-hardening/master.adoc b/downstream/titles/aap-hardening/master.adoc index 9c7a628e47..611624c523 100644 --- a/downstream/titles/aap-hardening/master.adoc +++ b/downstream/titles/aap-hardening/master.adoc @@ -3,17 +3,18 @@ :toclevels: 1 :experimental: +:hardening: include::attributes/attributes.adoc[] // Book Title -= Red Hat Ansible Automation Platform hardening guide += Hardening and compliance This guide provides recommended practices for various processes needed to install, configure, and maintain {PlatformNameShort} on Red Hat Enterprise Linux in a secure manner. include::{Boilerplate}[] include::aap-hardening/assembly-intro-to-aap-hardening.adoc[leveloffset=+1] include::aap-hardening/assembly-hardening-aap.adoc[leveloffset=+1] -// include::aap-hardening/assembly-aap-compliance.adoc[leveloffset=+1] -// include::aap-hardening/assembly-aap-security-enabling.adoc[leveloffset=+1] +//include::aap-hardening/assembly-aap-compliance.adoc[leveloffset=+1] +include::aap-hardening/assembly-aap-security-use-cases.adoc[leveloffset=+1] diff --git a/downstream/titles/aap-hardening/platform b/downstream/titles/aap-hardening/platform new file mode 120000 index 0000000000..9a3ca429a1 --- /dev/null +++ b/downstream/titles/aap-hardening/platform @@ -0,0 +1 @@ +../../modules/platform/ \ No newline at end of file diff --git a/downstream/titles/aap-installation-guide/docinfo.xml b/downstream/titles/aap-installation-guide/docinfo.xml index 4cc3491516..1153b284cc 100644 --- a/downstream/titles/aap-installation-guide/docinfo.xml +++ b/downstream/titles/aap-installation-guide/docinfo.xml @@ -1,7 +1,7 @@ -Red Hat Ansible Automation Platform installation guide +RPM installation Red Hat Ansible Automation Platform 2.5 -Install Ansible Automation Platform +Install the RPM version of Ansible Automation Platform This guide shows you how to install Red Hat Ansible Automation Platform based on supported installation scenarios. diff --git a/downstream/titles/aap-installation-guide/master.adoc b/downstream/titles/aap-installation-guide/master.adoc index c118a659c2..ec060bc790 100644 --- a/downstream/titles/aap-installation-guide/master.adoc +++ b/downstream/titles/aap-installation-guide/master.adoc @@ -3,11 +3,12 @@ :toclevels: 1 :experimental: +:aap-install: include::attributes/attributes.adoc[] // Book Title -= Red Hat Ansible Automation Platform installation guide += RPM installation Thank you for your interest in {PlatformName}. {PlatformNameShort} is a commercial offering that helps teams manage complex multi-tier deployments by adding control, knowledge, and delegation to Ansible-powered environments. @@ -21,6 +22,7 @@ include::platform/assembly-platform-install-overview.adoc[leveloffset=+1] include::platform/assembly-system-requirements.adoc[leveloffset=+1] include::platform/assembly-platform-install-scenario.adoc[leveloffset=+1] //[dcdacosta]Removing this assembly because modules are now included in assembly-platform-install-scenario] include::platform/assembly-deploy-high-availability-hub.adoc[leveloffset=+1] +include::platform/assembly-horizontal-scaling.adoc[leveloffset=+1] include::platform/assembly-disconnected-installation.adoc[leveloffset=+1] //12/2/22 [dcd: moved following assemblies to new operations guide] //include::platform/assembly-configuring-proxy-support.adoc[leveloffset=+1] diff --git a/downstream/titles/aap-operations-guide/docinfo.xml b/downstream/titles/aap-operations-guide/docinfo.xml index d018690328..5ff53ae44c 100644 --- a/downstream/titles/aap-operations-guide/docinfo.xml +++ b/downstream/titles/aap-operations-guide/docinfo.xml @@ -1,4 +1,4 @@ -Red Hat Ansible Automation Platform operations guide +Operating Ansible Automation Platform Red Hat Ansible Automation Platform 2.5 Post installation configurations to ensure a smooth deployment of Ansible Automation Platform installation diff --git a/downstream/titles/aap-operations-guide/master.adoc b/downstream/titles/aap-operations-guide/master.adoc index 74ee76e4cf..cd4465544a 100644 --- a/downstream/titles/aap-operations-guide/master.adoc +++ b/downstream/titles/aap-operations-guide/master.adoc @@ -8,13 +8,13 @@ include::attributes/attributes.adoc[] // Book Title -= Red Hat Ansible Automation Platform operations guide += Operating Ansible Automation Platform After installing Red Hat Ansible Automation Platform, your system might need extra configuration to ensure your deployment runs smoothly. This guide provides procedures for configuration tasks that you can perform after installing {PlatformName}. include::{Boilerplate}[] -include::platform/assembly-aap-activate.adoc[leveloffset=+1] -include::platform/assembly-aap-manifest-files.adoc[leveloffset=+1] +// ddacosta - removed to avoid duplication with access management guide include::platform/assembly-aap-activate.adoc[leveloffset=+1] +// emurtoug removed this assembly to avoid duplication within Access management and authentication include::platform/assembly-aap-manifest-files.adoc[leveloffset=+1] //ifowler assembly transferred from installation guide as part of AAP-18700 include::platform/assembly-platform-whats-next.adoc[leveloffset=+1] include::platform/assembly-configuring-proxy-support.adoc[leveloffset=+1] diff --git a/downstream/titles/aap-operator-backup/docinfo.xml b/downstream/titles/aap-operator-backup/docinfo.xml index 94391e3c32..8b76b1f66a 100644 --- a/downstream/titles/aap-operator-backup/docinfo.xml +++ b/downstream/titles/aap-operator-backup/docinfo.xml @@ -1,4 +1,4 @@ -Red Hat Ansible Automation Platform operator backup and recovery guide +Backup and recovery for operator environments Red Hat Ansible Automation Platform 2.5 diff --git a/downstream/titles/aap-operator-backup/master.adoc b/downstream/titles/aap-operator-backup/master.adoc index 8604ad7c07..b259c2d0ad 100644 --- a/downstream/titles/aap-operator-backup/master.adoc +++ b/downstream/titles/aap-operator-backup/master.adoc @@ -8,7 +8,7 @@ include::attributes/attributes.adoc[] // Book Title -= Red Hat Ansible Automation Platform operator backup and recovery guide += Backup and recovery for operator environments Thank you for your interest in {PlatformName}. {PlatformNameShort} is a commercial offering that helps teams manage complex multi-tier deployments by adding control, knowledge, and delegation to Ansible-powered environments. diff --git a/downstream/titles/aap-operator-installation/docinfo.xml b/downstream/titles/aap-operator-installation/docinfo.xml index 8cd75da97f..2b16dbedcf 100644 --- a/downstream/titles/aap-operator-installation/docinfo.xml +++ b/downstream/titles/aap-operator-installation/docinfo.xml @@ -1,4 +1,4 @@ -Deploying the Red Hat Ansible Automation Platform operator on OpenShift Container Platform +Installing on OpenShift Container Platform Red Hat Ansible Automation Platform 2.5 Install and configure Ansible Automation Platform operator on OpenShift Container Platform diff --git a/downstream/titles/aap-operator-installation/master.adoc b/downstream/titles/aap-operator-installation/master.adoc index 1a6283a893..7e217e0c8d 100644 --- a/downstream/titles/aap-operator-installation/master.adoc +++ b/downstream/titles/aap-operator-installation/master.adoc @@ -9,11 +9,11 @@ include::attributes/attributes.adoc[] // Book Title -= Deploying the Red Hat Ansible Automation Platform operator on OpenShift Container Platform += Installing on OpenShift Container Platform Thank you for your interest in {PlatformName}. {PlatformNameShort} is a commercial offering that helps teams manage complex multi-tier deployments by adding control, knowledge, and delegation to Ansible-powered environments. -This guide helps you to understand the installation, migration and upgrade requirements for deploying the {OperatorPlatform} on {OCPShort}. +This guide helps you to understand the installation, migration and upgrade requirements for deploying the {OperatorPlatformNameShort} on {OCPShort}. include::{Boilerplate}[] @@ -21,7 +21,10 @@ include::platform/assembly-operator-install-planning.adoc[leveloffset=+1] include::platform/assembly-install-aap-operator.adoc[leveloffset=+1] -// Part of the 2.5 release commenting out until live +include::platform/assembly-update-ocp.adoc[leveloffset=+1] + +include::platform/assembly-installing-aap-operator-cli.adoc[leveloffset=+1] + include::platform/assembly-configure-aap-operator.adoc[leveloffset=+1] include::platform/assembly-installing-controller-operator.adoc[leveloffset=+1] @@ -32,16 +35,18 @@ include::platform/assembly-installing-hub-operator.adoc[leveloffset=+1] // include::platform/assembly-installing-controller-operator-local-db.adoc[leveloffset=+1] -include::platform/assembly-installing-aap-operator-cli.adoc[leveloffset=+1] -include::platform/assembly-deploy-eda-controller-on-aap-operator.adoc[leveloffset=+1] +// [gmurray] Commenting out this module as covered in assembly-configure-aap-operator.adoc +// include::platform/assembly-deploy-eda-controller-on-aap-operator.adoc[leveloffset=+1] +include::platform/platform/proc-operator-deploy-redis.adoc[leveloffset=+1] -include::platform/assembly-using-rhsso-operator-with-automation-hub.adoc[leveloffset=+1] include::platform/assembly-aap-migration.adoc[leveloffset=+1] -// [gmurray] Commenting out this module as part of AAP-22627. Upgrade is not supported in the initial 2.5 release. -// include::platform/assembly-operator-upgrade.adoc[leveloffset=+1] +include::platform/assembly-operator-upgrade.adoc[leveloffset=+1] include::platform/assembly-operator-add-execution-nodes.adoc[leveloffset=+1] + include::platform/assembly-controller-resource-operator.adoc[leveloffset=+1] + +include::platform/assembly-appendix-operator-crs.adoc[leveloffset=+1] diff --git a/downstream/titles/aap-planning-guide/docinfo.xml b/downstream/titles/aap-planning-guide/docinfo.xml index 71a8b37b09..1d7ad68203 100644 --- a/downstream/titles/aap-planning-guide/docinfo.xml +++ b/downstream/titles/aap-planning-guide/docinfo.xml @@ -1,4 +1,4 @@ -Red Hat Ansible Automation Platform planning guide +Planning your installation Red Hat Ansible Automation Platform 2.5 Plan for installation of Ansible Automation Platform diff --git a/downstream/titles/aap-planning-guide/master.adoc b/downstream/titles/aap-planning-guide/master.adoc index 8a8424685b..609610eca1 100644 --- a/downstream/titles/aap-planning-guide/master.adoc +++ b/downstream/titles/aap-planning-guide/master.adoc @@ -8,7 +8,7 @@ include::attributes/attributes.adoc[] // Book Title -= Red Hat Ansible Automation Platform planning guide += Planning your installation Thank you for your interest in {PlatformName}. {PlatformNameShort} is a commercial offering that helps teams manage complex multitiered deployments by adding control, knowledge, and delegation to Ansible-powered environments. @@ -16,11 +16,15 @@ Use the information in this guide to plan your {PlatformName} installation. include::{Boilerplate}[] include::platform/assembly-planning-installation.adoc[leveloffset=+1] -include::platform/assembly-aap-architecture.adoc[leveloffset=+1] +// emurtough removed to avoid duplication with topologies chapter include::platform/assembly-aap-architecture.adoc[leveloffset=+1] include::platform/assembly-aap-platform-components.adoc[leveloffset=+1] +include::platform/assembly-HA-redis.adoc[leveloffset=+1] +:aap-plan: +include::topologies/assembly-overview-tested-deployment-models.adoc[leveloffset=+1] include::platform/assembly-system-requirements.adoc[leveloffset=+1] +:!aap-plan: include::platform/assembly-network-ports-protocols.adoc[leveloffset=+1] -include::platform/assembly-attaching-subscriptions.adoc[leveloffset=+1] +// emurtough removed subscription info to avoid duplication within Access management and authentication include::platform/assembly-attaching-subscriptions.adoc[leveloffset=+1] include::platform/assembly-choosing-obtaining-installer.adoc[leveloffset=+1] include::platform/assembly-inventory-introduction.adoc[leveloffset=+1] -include::platform/assembly-supported-installation-scenarios.adoc[leveloffset=+1] +// emurtough removed to avoid duplication with topologies docs include::platform/assembly-supported-installation-scenarios.adoc[leveloffset=+1] diff --git a/downstream/titles/aap-planning-guide/topologies b/downstream/titles/aap-planning-guide/topologies new file mode 120000 index 0000000000..760101fd3c --- /dev/null +++ b/downstream/titles/aap-planning-guide/topologies @@ -0,0 +1 @@ +../../assemblies/topologies \ No newline at end of file diff --git a/downstream/titles/aap-plugin-rhdh/aap-common b/downstream/titles/aap-plugin-rhdh-install/aap-common similarity index 100% rename from downstream/titles/aap-plugin-rhdh/aap-common rename to downstream/titles/aap-plugin-rhdh-install/aap-common diff --git a/downstream/titles/aap-plugin-rhdh/attributes b/downstream/titles/aap-plugin-rhdh-install/attributes similarity index 100% rename from downstream/titles/aap-plugin-rhdh/attributes rename to downstream/titles/aap-plugin-rhdh-install/attributes diff --git a/downstream/titles/aap-plugin-rhdh/devtools b/downstream/titles/aap-plugin-rhdh-install/devtools similarity index 100% rename from downstream/titles/aap-plugin-rhdh/devtools rename to downstream/titles/aap-plugin-rhdh-install/devtools diff --git a/downstream/titles/aap-plugin-rhdh-install/docinfo.xml b/downstream/titles/aap-plugin-rhdh-install/docinfo.xml new file mode 100644 index 0000000000..8445ef71f4 --- /dev/null +++ b/downstream/titles/aap-plugin-rhdh-install/docinfo.xml @@ -0,0 +1,11 @@ +Installing Ansible plug-ins for Red Hat Developer Hub +Red Hat Ansible Automation Platform +2.5 +Install and configure Ansible plug-ins for Red Hat Developer Hub + + This guide describes how to install and configure Ansible plug-ins for Red Hat Developer Hub so that users can learn about Ansible, explore curated collections, and develop automation projects. + + + Red Hat Customer Content Services + + diff --git a/downstream/titles/aap-plugin-rhdh/images b/downstream/titles/aap-plugin-rhdh-install/images similarity index 100% rename from downstream/titles/aap-plugin-rhdh/images rename to downstream/titles/aap-plugin-rhdh-install/images diff --git a/downstream/titles/aap-plugin-rhdh-install/master.adoc b/downstream/titles/aap-plugin-rhdh-install/master.adoc new file mode 100644 index 0000000000..be0a3224d0 --- /dev/null +++ b/downstream/titles/aap-plugin-rhdh-install/master.adoc @@ -0,0 +1,45 @@ +:imagesdir: images +:numbered: +:toclevels: 4 +:experimental: +:context: aap-plugin-rhdh-installing + +include::attributes/attributes.adoc[] + +// Book Title += Installing Ansible plug-ins for Red Hat Developer Hub + +Thank you for your interest in {PlatformName}. {PlatformNameShort} is a commercial offering that helps teams manage complex multi-tier deployments by adding control, knowledge, and delegation to Ansible-powered environments. + +This guide describes how to install {AAPRHDH}. +This document has been updated to include information for the latest release of {PlatformNameShort}. + +include::{Boilerplate}[] + +// [IMPORTANT] +// ==== +// {AAPRHDH} is a Technology Preview feature only. +// include::snippets/technology-preview.adoc[] +// ==== + +include::devtools/assembly-rhdh-intro.adoc[leveloffset=+1] + + +// Installation +include::devtools/assembly-rhdh-install-ocp-helm.adoc[leveloffset=+1] +include::devtools/assembly-rhdh-install-ocp-operator.adoc[leveloffset=+1] +// +// Subscription warnings +include::devtools/assembly-rhdh-subscription-warnings.adoc[leveloffset=+1] +// +// Upgrade +include::devtools/assembly-rhdh-upgrade-ocp-helm.adoc[leveloffset=+1] +include::devtools/assembly-rhdh-upgrade-ocp-operator.adoc[leveloffset=+1] +// +// Uninstall +include::devtools/assembly-rhdh-uninstall-ocp-helm.adoc[leveloffset=+1] +include::devtools/assembly-rhdh-uninstall-ocp-operator.adoc[leveloffset=+1] +// +// Telemetry +include::devtools/assembly-rhdh-telemetry-capturing.adoc[leveloffset=+1] + diff --git a/downstream/titles/aap-plugin-rhdh-install/snippets b/downstream/titles/aap-plugin-rhdh-install/snippets new file mode 120000 index 0000000000..7bf6da9a51 --- /dev/null +++ b/downstream/titles/aap-plugin-rhdh-install/snippets @@ -0,0 +1 @@ +../../snippets \ No newline at end of file diff --git a/downstream/titles/aap-plugin-rhdh-using/aap-common b/downstream/titles/aap-plugin-rhdh-using/aap-common new file mode 120000 index 0000000000..472eeb4dac --- /dev/null +++ b/downstream/titles/aap-plugin-rhdh-using/aap-common @@ -0,0 +1 @@ +../../aap-common \ No newline at end of file diff --git a/downstream/titles/aap-plugin-rhdh-using/attributes b/downstream/titles/aap-plugin-rhdh-using/attributes new file mode 120000 index 0000000000..a5caaa73a5 --- /dev/null +++ b/downstream/titles/aap-plugin-rhdh-using/attributes @@ -0,0 +1 @@ +../../attributes \ No newline at end of file diff --git a/downstream/titles/aap-plugin-rhdh-using/devtools b/downstream/titles/aap-plugin-rhdh-using/devtools new file mode 120000 index 0000000000..dc79f7e1fa --- /dev/null +++ b/downstream/titles/aap-plugin-rhdh-using/devtools @@ -0,0 +1 @@ +../../assemblies/devtools \ No newline at end of file diff --git a/downstream/titles/aap-plugin-rhdh-using/docinfo.xml b/downstream/titles/aap-plugin-rhdh-using/docinfo.xml new file mode 100644 index 0000000000..f2bf42993d --- /dev/null +++ b/downstream/titles/aap-plugin-rhdh-using/docinfo.xml @@ -0,0 +1,11 @@ +Using Ansible plug-ins for Red Hat Developer Hub +Red Hat Ansible Automation Platform +2.5 +Use Ansible plug-ins for Red Hat Developer Hub + + This guide describes how to use Ansible plug-ins for Red Hat Developer Hub to learn about Ansible, explore curated collections, and create playbook projects. + + + Red Hat Customer Content Services + + diff --git a/downstream/titles/aap-plugin-rhdh-using/images b/downstream/titles/aap-plugin-rhdh-using/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/downstream/titles/aap-plugin-rhdh-using/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/downstream/titles/aap-plugin-rhdh/master.adoc b/downstream/titles/aap-plugin-rhdh-using/master.adoc similarity index 54% rename from downstream/titles/aap-plugin-rhdh/master.adoc rename to downstream/titles/aap-plugin-rhdh-using/master.adoc index 6e4a7cc2c6..8ab81c6f8b 100644 --- a/downstream/titles/aap-plugin-rhdh/master.adoc +++ b/downstream/titles/aap-plugin-rhdh-using/master.adoc @@ -2,23 +2,26 @@ :numbered: :toclevels: 4 :experimental: -:context: aap-plugin-rhdh +:context: aap-plugin-rhdh-using include::attributes/attributes.adoc[] // Book Title -= Ansible plug-ins for Red Hat Developer Hub += Using Ansible plug-ins for Red Hat Developer Hub Thank you for your interest in {PlatformName}. {PlatformNameShort} is a commercial offering that helps teams manage complex multi-tier deployments by adding control, knowledge, and delegation to Ansible-powered environments. -This guide describes how to install and use {AAPRHDH}. +This guide describes how to use {AAPRHDH}. This document has been updated to include information for the latest release of {PlatformNameShort}. include::{Boilerplate}[] -include::devtools/assembly-rhdh-intro.adoc[leveloffset=+1] -include::devtools/assembly-rhdh-planning.adoc[leveloffset=+1] -include::devtools/assembly-rhdh-install.adoc[leveloffset=+1] -include::devtools/assembly-rhdh-upgrading-uninstalling.adoc[leveloffset=+1] -include::devtools/assembly-rhdh-configure.adoc[leveloffset=+1] +// [IMPORTANT] +// ==== +// {AAPRHDH} is a Technology Preview feature only. +// include::snippets/technology-preview.adoc[] +// ==== + include::devtools/assembly-rhdh-using.adoc[leveloffset=+1] +include::devtools/assembly-rhdh-feedback.adoc[leveloffset=+1] +include::devtools/assembly-rhdh-example.adoc[leveloffset=+1] diff --git a/downstream/titles/aap-plugin-rhdh-using/snippets b/downstream/titles/aap-plugin-rhdh-using/snippets new file mode 120000 index 0000000000..7bf6da9a51 --- /dev/null +++ b/downstream/titles/aap-plugin-rhdh-using/snippets @@ -0,0 +1 @@ +../../snippets \ No newline at end of file diff --git a/downstream/titles/analytics/aap-common b/downstream/titles/analytics/aap-common new file mode 120000 index 0000000000..472eeb4dac --- /dev/null +++ b/downstream/titles/analytics/aap-common @@ -0,0 +1 @@ +../../aap-common \ No newline at end of file diff --git a/downstream/titles/analytics/analytics b/downstream/titles/analytics/analytics new file mode 120000 index 0000000000..20840e99de --- /dev/null +++ b/downstream/titles/analytics/analytics @@ -0,0 +1 @@ +../../assemblies/analytics \ No newline at end of file diff --git a/downstream/titles/analytics/attributes b/downstream/titles/analytics/attributes new file mode 120000 index 0000000000..a5caaa73a5 --- /dev/null +++ b/downstream/titles/analytics/attributes @@ -0,0 +1 @@ +../../attributes \ No newline at end of file diff --git a/downstream/titles/analytics/automation-savings-planner/aap-common b/downstream/titles/analytics/automation-savings-planner/aap-common deleted file mode 120000 index ab3cbbd419..0000000000 --- a/downstream/titles/analytics/automation-savings-planner/aap-common +++ /dev/null @@ -1 +0,0 @@ -../../../aap-common/ \ No newline at end of file diff --git a/downstream/titles/analytics/automation-savings-planner/analytics b/downstream/titles/analytics/automation-savings-planner/analytics deleted file mode 120000 index 150b501734..0000000000 --- a/downstream/titles/analytics/automation-savings-planner/analytics +++ /dev/null @@ -1 +0,0 @@ -../../../assemblies/analytics \ No newline at end of file diff --git a/downstream/titles/analytics/automation-savings/aap-common b/downstream/titles/analytics/automation-savings/aap-common deleted file mode 120000 index ab3cbbd419..0000000000 --- a/downstream/titles/analytics/automation-savings/aap-common +++ /dev/null @@ -1 +0,0 @@ -../../../aap-common/ \ No newline at end of file diff --git a/downstream/titles/analytics/automation-savings/analytics b/downstream/titles/analytics/automation-savings/analytics deleted file mode 120000 index 4d9cc94a9d..0000000000 --- a/downstream/titles/analytics/automation-savings/analytics +++ /dev/null @@ -1 +0,0 @@ -../../../assemblies/analytics/ \ No newline at end of file diff --git a/downstream/titles/analytics/automation-savings/attributes b/downstream/titles/analytics/automation-savings/attributes deleted file mode 120000 index 8615cf3107..0000000000 --- a/downstream/titles/analytics/automation-savings/attributes +++ /dev/null @@ -1 +0,0 @@ -../../../attributes/ \ No newline at end of file diff --git a/downstream/titles/analytics/docinfo.xml b/downstream/titles/analytics/docinfo.xml new file mode 100644 index 0000000000..a15b8b438d --- /dev/null +++ b/downstream/titles/analytics/docinfo.xml @@ -0,0 +1,11 @@ +Using automation analytics +Red Hat Ansible Automation Platform +2.5 +Evaluate the cost savings associated with automated processes + +This guide shows how to use the features of automation analytics to evaluate how automation is deployed across your environments and the savings associated with it. + + + Red Hat Customer Content Services + + diff --git a/downstream/titles/analytics/images b/downstream/titles/analytics/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/downstream/titles/analytics/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/downstream/titles/analytics/job-explorer/.gitkeep b/downstream/titles/analytics/job-explorer/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/downstream/titles/analytics/job-explorer/aap-common b/downstream/titles/analytics/job-explorer/aap-common deleted file mode 120000 index ab3cbbd419..0000000000 --- a/downstream/titles/analytics/job-explorer/aap-common +++ /dev/null @@ -1 +0,0 @@ -../../../aap-common/ \ No newline at end of file diff --git a/downstream/titles/analytics/job-explorer/analytics b/downstream/titles/analytics/job-explorer/analytics deleted file mode 120000 index 4d9cc94a9d..0000000000 --- a/downstream/titles/analytics/job-explorer/analytics +++ /dev/null @@ -1 +0,0 @@ -../../../assemblies/analytics/ \ No newline at end of file diff --git a/downstream/titles/analytics/job-explorer/attributes b/downstream/titles/analytics/job-explorer/attributes deleted file mode 120000 index 8615cf3107..0000000000 --- a/downstream/titles/analytics/job-explorer/attributes +++ /dev/null @@ -1 +0,0 @@ -../../../attributes/ \ No newline at end of file diff --git a/downstream/titles/analytics/master.adoc b/downstream/titles/analytics/master.adoc new file mode 100644 index 0000000000..494b759dc5 --- /dev/null +++ b/downstream/titles/analytics/master.adoc @@ -0,0 +1,22 @@ +:imagesdir: images +:numbered: +:toclevels: 1 + +:experimental: + +include::attributes/attributes.adoc[] + + +// Book Title += Using automation analytics + +This guide shows how to use the features of automation analytics to evaluate how automation is deployed across your environments and the savings associated with it. + +// Downstream content only +include::{Boilerplate}[] + +// Contents +include::analytics/assembly-evaluating-automation-return.adoc[leveloffset=+1] +include::analytics/assembly-automation-savings-planner.adoc[leveloffset=+1] +include::analytics/assembly-insights-reports.adoc[leveloffset=+1] +include::analytics/assembly-using-job-explorer.adoc[leveloffset=+1] diff --git a/downstream/titles/analytics/reports/aap-common b/downstream/titles/analytics/reports/aap-common deleted file mode 120000 index ab3cbbd419..0000000000 --- a/downstream/titles/analytics/reports/aap-common +++ /dev/null @@ -1 +0,0 @@ -../../../aap-common/ \ No newline at end of file diff --git a/downstream/titles/analytics/reports/analytics b/downstream/titles/analytics/reports/analytics deleted file mode 120000 index 150b501734..0000000000 --- a/downstream/titles/analytics/reports/analytics +++ /dev/null @@ -1 +0,0 @@ -../../../assemblies/analytics \ No newline at end of file diff --git a/downstream/titles/analytics/reports/attributes b/downstream/titles/analytics/reports/attributes deleted file mode 120000 index 0d100da61c..0000000000 --- a/downstream/titles/analytics/reports/attributes +++ /dev/null @@ -1 +0,0 @@ -../../../attributes \ No newline at end of file diff --git a/downstream/titles/analytics/snippets b/downstream/titles/analytics/snippets new file mode 120000 index 0000000000..7bf6da9a51 --- /dev/null +++ b/downstream/titles/analytics/snippets @@ -0,0 +1 @@ +../../snippets \ No newline at end of file diff --git a/downstream/titles/automation-mesh/docinfo.xml b/downstream/titles/automation-mesh/docinfo.xml index 9aedd47fd6..21d82d8834 100644 --- a/downstream/titles/automation-mesh/docinfo.xml +++ b/downstream/titles/automation-mesh/docinfo.xml @@ -1,4 +1,4 @@ -Red Hat Ansible Automation Platform automation mesh guide for VM-based installations +Automation mesh for VM environments Red Hat Ansible Automation Platform 2.5 Automate at scale in a cloud-native way diff --git a/downstream/titles/automation-mesh/master.adoc b/downstream/titles/automation-mesh/master.adoc index 81af89fcd7..b037bc6122 100644 --- a/downstream/titles/automation-mesh/master.adoc +++ b/downstream/titles/automation-mesh/master.adoc @@ -9,7 +9,7 @@ include::attributes/attributes.adoc[] // Book Title -= Red Hat Ansible Automation Platform automation mesh guide for VM-based installations += Automation mesh for VM environments Thank you for your interest in {PlatformName}. {PlatformNameShort} is a commercial offering that helps teams manage complex multi-tier deployments by adding control, knowledge, and delegation to Ansible-powered environments. diff --git a/downstream/titles/builder/docinfo.xml b/downstream/titles/builder/docinfo.xml index c31a36f9fb..55235fd4a1 100644 --- a/downstream/titles/builder/docinfo.xml +++ b/downstream/titles/builder/docinfo.xml @@ -1,9 +1,10 @@ -Creating and consuming execution environments +Creating and using execution environments Red Hat Ansible Automation Platform 2.5 - Create and use execution environments with Ansible Builder + Create and use execution environment containers This guide shows how to create consistent and reproducible automation execution environments for your Red Hat Ansible Automation Platform. +This document includes content from the upstream docs.ansible.com documentation, which is covered by the Apache 2.0 license. Red Hat Customer Content Services diff --git a/downstream/titles/builder/master.adoc b/downstream/titles/builder/master.adoc index 7a526eadc5..a09a016612 100644 --- a/downstream/titles/builder/master.adoc +++ b/downstream/titles/builder/master.adoc @@ -1,15 +1,15 @@ :imagesdir: images :numbered: :toclevels: 1 - +:context: builder :experimental: include::attributes/attributes.adoc[] // Book Title -= Creating and consuming execution environments += Creating and using execution environments -Use {Builder} to create consistent and reproducible {ExecEnvName} for your {PlatformName} needs. +Use {ExecEnvshort} builder to create consistent and reproducible containers for your {PlatformName} needs. include::{Boilerplate}[] @@ -21,6 +21,7 @@ include::builder/assembly-publishing-exec-env.adoc[leveloffset=+1] include::hub/assembly-populate-container-registry.adoc[leveloffset=+1] include::hub/assembly-setup-container-repository.adoc[leveloffset=+1] include::hub/assembly-pull-image.adoc[leveloffset=+1] - +include::builder/assembly-open-source-license.adoc[leveloffset=+1] [appendix] include::builder/builder/con-ee-precedence.adoc[leveloffset=+1] + \ No newline at end of file diff --git a/downstream/titles/builder/platform b/downstream/titles/builder/platform new file mode 120000 index 0000000000..06b49528ee --- /dev/null +++ b/downstream/titles/builder/platform @@ -0,0 +1 @@ +../../assemblies/platform \ No newline at end of file diff --git a/downstream/titles/central-auth/docinfo.xml b/downstream/titles/central-auth/docinfo.xml index 63edd7f066..8a66af0c12 100644 --- a/downstream/titles/central-auth/docinfo.xml +++ b/downstream/titles/central-auth/docinfo.xml @@ -1,10 +1,10 @@ -Installing and configuring central authentication for the Ansible Automation Platform +Access management and authentication Red Hat Ansible Automation Platform 2.5 -Enable central authentication functions for your Ansible Automation Platform +Configure role based access control, authenticators and authenticator maps in Ansible Automation Platform -This guide provides platform administrators with the information and procedures required to enable and configure central authentication on Ansible Automation Platform. +This guide provides requirements, options, and recommendations for controlling access to Red Hat Ansible Automation Platform resources. Red Hat Customer Content Services diff --git a/downstream/titles/central-auth/images b/downstream/titles/central-auth/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/downstream/titles/central-auth/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/downstream/titles/central-auth/master.adoc b/downstream/titles/central-auth/master.adoc index 07ac6a359c..e96826cbb1 100644 --- a/downstream/titles/central-auth/master.adoc +++ b/downstream/titles/central-auth/master.adoc @@ -7,15 +7,19 @@ include::attributes/attributes.adoc[] // Book Title -= Installing and configuring central authentication for the Ansible Automation Platform - -{AAPCentralAuth} is a third-party identity provider (idP) solution, allowing for a simplified single sign-on solution that can be used across the {PlatformNameShort}. Platform administrators can utilize {CentralAuth} to test connectivity and authentication, as well as onboard new users and manage user permissions by configuring and assigning them to groups. Along with OpenID Connect-based and LDAP support, {CentralAuth} also provides a supported REST API which can be used to bootstrap customer usage. += Access management and authentication include::{Boilerplate}[] -include::central-auth/assembly-central-auth-hub.adoc[leveloffset=+1] -include::central-auth/assembly-central-auth-add-user-storage.adoc[leveloffset=+1] -include::central-auth/assembly-assign-hub-admin-permissions.adoc[leveloffset=+1] -include::central-auth/assembly-central-auth-identity-broker.adoc[leveloffset=+1] -include::central-auth/assembly-central-auth-group-perms.adoc[leveloffset=+1] -include::central-auth/assembly-configuring-central-auth-generic-oidc-settings.adoc[leveloffset=+1] +include::platform/platform/con-gw-overview-access-auth.adoc[leveloffset=+1] + +include::platform/assembly-gateway-licensing.adoc[leveloffset=+1] + +include::platform/assembly-gw-configure-authentication.adoc[leveloffset=+1] +include::platform/assembly-gw-config-authentication-type.adoc[leveloffset=+2] +include::platform/assembly-gw-mapping.adoc[leveloffset=+2] +include::platform/assembly-gw-managing-authentication.adoc[leveloffset=+2] +include::platform/assembly-gw-token-based-authentication.adoc[leveloffset=+1] +include::platform/assembly-gw-managing-access.adoc[leveloffset=+1] +include::platform/assembly-gw-roles.adoc[leveloffset=+1] +include::platform/assembly-gw-settings.adoc[leveloffset=+1] diff --git a/downstream/titles/central-auth/platform b/downstream/titles/central-auth/platform new file mode 120000 index 0000000000..06b49528ee --- /dev/null +++ b/downstream/titles/central-auth/platform @@ -0,0 +1 @@ +../../assemblies/platform \ No newline at end of file diff --git a/downstream/titles/central-auth/snippets b/downstream/titles/central-auth/snippets new file mode 120000 index 0000000000..7bf6da9a51 --- /dev/null +++ b/downstream/titles/central-auth/snippets @@ -0,0 +1 @@ +../../snippets \ No newline at end of file diff --git a/downstream/titles/controller/controller-admin-guide/docinfo.xml b/downstream/titles/controller/controller-admin-guide/docinfo.xml index a22b7974ff..5cc1bb1f9f 100644 --- a/downstream/titles/controller/controller-admin-guide/docinfo.xml +++ b/downstream/titles/controller/controller-admin-guide/docinfo.xml @@ -1,7 +1,7 @@ -Automation controller administration guide +Configuring automation execution Red Hat Ansible Automation Platform 2.5 -Administrator Guide for Automation Controller +Learn how to manage, monitor, and use automation controller Learn how to manage automation controller through custom scripts, management jobs, and more. diff --git a/downstream/titles/controller/controller-admin-guide/master.adoc b/downstream/titles/controller/controller-admin-guide/master.adoc index 0e285bfe8f..acd202f15a 100644 --- a/downstream/titles/controller/controller-admin-guide/master.adoc +++ b/downstream/titles/controller/controller-admin-guide/master.adoc @@ -9,35 +9,40 @@ include::attributes/attributes.adoc[] // Book Title -= Automation controller administration guide += Configuring automation execution -The {ControllerName} Administration Guide describes the administration of {ControllerName} through custom scripts, management jobs, and more. -Written for DevOps engineers and administrators, the {ControllerName} Administration Guide assumes a basic understanding of the systems requiring management with {ControllerName}s easy-to-use graphical interface. +This guide describes the administration of {ControllerName} through custom scripts, management jobs, and more. +Written for DevOps engineers and administrators, the Configuring automation execution guide assumes a basic understanding of the systems requiring management with {ControllerName}s easy-to-use graphical interface. include::{Boilerplate}[] //include::platform/assembly-controller-licensing.adoc[leveloffset=+1] include::platform/assembly-ag-controller-start-stop-controller.adoc[leveloffset=+1] +//Uses Settings menu. Which may be separate documentation. +include::platform/assembly-ag-controller-config.adoc[leveloffset=+1] +include::platform/assembly-controller-improving-performance.adoc[leveloffset=+1] +include::platform/assembly-controller-management-jobs.adoc[leveloffset=+1] //Deprecated //include::platform/assembly-custom-inventory-scripts.adoc[leveloffset=+1] include::platform/assembly-inventory-file-importing.adoc[leveloffset=+1] //include::platform/assembly-multi-credential-assignment.adoc[leveloffset=+1] -include::platform/assembly-controller-management-jobs.adoc[leveloffset=+1] include::platform/assembly-ag-controller-clustering.adoc[leveloffset=+1] -include::platform/assembly-ag-instance-and-container-groups.adoc[leveloffset=+1] -include::platform/assembly-controller-instances.adoc[leveloffset=+1] -include::platform/assembly-controller-topology-viewer.adoc[leveloffset=+1] +//Removed to user Guide +//include::platform/assembly-controller-instances.adoc[leveloffset=+1] +//include::platform/assembly-ag-instance-and-container-groups.adoc[leveloffset=+1] +//Removed to User Guide +//include::platform/assembly-controller-topology-viewer.adoc[leveloffset=+1] include::platform/assembly-controller-log-files.adoc[leveloffset=+1] //Lizzi's work: Logging removed at 2.5-next include::platform/assembly-controller-logging-aggregation.adoc[leveloffset=+1] include::platform/assembly-controller-metrics.adoc[leveloffset=+1] -include::platform/assembly-controller-improving-performance.adoc[leveloffset=+1] +include::platform/assembly-metrics-utility.adoc[leveloffset=+1] +include::platform/assembly-controller-secret-management.adoc[leveloffset=+1] include::platform/assembly-ag-controller-secret-handling.adoc[leveloffset=+1] include::platform/assembly-ag-controller-security-best-practices.adoc[leveloffset=+1] include::platform/assembly-controller-awx-manage-utility.adoc[leveloffset=+1] -//Uses Settings menu. Which may be separate documentation. -include::platform/assembly-ag-controller-config.adoc[leveloffset=+1] -include::platform/assembly-controller-isolation-function-variables.adoc[leveloffset=+1] +//Duplicate of content in security and now moved to jobs +//include::platform/assembly-controller-isolation-function-variables.adoc[leveloffset=+1] //Donna's work //include::platform/assembly-controller-token-based-authentication.adoc[leveloffset=+1] //include::platform/assembly-controller-set-up-social-authentication.adoc[leveloffset=+1] @@ -48,8 +53,6 @@ include::platform/assembly-controller-isolation-function-variables.adoc[leveloff //include::platform/assembly-ag-controller-session-limits.adoc[leveloffset=+1] include::platform/assembly-ag-controller-backup-and-restore.adoc[leveloffset=+1] //section 28 is a replica of section 18.4.2, so removing it -//Uses Settings menu. Which may be separate documentation. -//Usability analytics is no longer supported. -//include::platform/assembly-ag-controller-usability-analytics.adoc[leveloffset=+1] +include::platform/assembly-ag-controller-usability-analytics.adoc[leveloffset=+1] include::platform/assembly-ag-controller-troubleshooting.adoc[leveloffset=+1] include::platform/assembly-ag-controller-tips-and-tricks.adoc[leveloffset=+1] diff --git a/downstream/titles/controller/controller-api-overview/docinfo.xml b/downstream/titles/controller/controller-api-overview/docinfo.xml index a866e685ea..7047335f62 100644 --- a/downstream/titles/controller/controller-api-overview/docinfo.xml +++ b/downstream/titles/controller/controller-api-overview/docinfo.xml @@ -1,4 +1,4 @@ -Automation controller API overview +Automation execution API overview Red Hat Ansible Automation Platform 2.5 Developer overview for the {ControllerName} API diff --git a/downstream/titles/controller/controller-api-overview/master.adoc b/downstream/titles/controller/controller-api-overview/master.adoc index cb57796ed5..d7c16b455d 100644 --- a/downstream/titles/controller/controller-api-overview/master.adoc +++ b/downstream/titles/controller/controller-api-overview/master.adoc @@ -8,7 +8,7 @@ include::attributes/attributes.adoc[] // Book Title -= Automation controller API overview += Automation execution API overview Thank you for your interest in {PlatformName}. {PlatformNameShort} helps teams manage complex multitiered deployments by adding control, knowledge, and delegation to Ansible-powered environments. diff --git a/downstream/titles/controller/controller-getting-started/attributes b/downstream/titles/controller/controller-getting-started/attributes deleted file mode 120000 index 0d100da61c..0000000000 --- a/downstream/titles/controller/controller-getting-started/attributes +++ /dev/null @@ -1 +0,0 @@ -../../../attributes \ No newline at end of file diff --git a/downstream/titles/controller/controller-getting-started/images b/downstream/titles/controller/controller-getting-started/images deleted file mode 120000 index 4dd3347de1..0000000000 --- a/downstream/titles/controller/controller-getting-started/images +++ /dev/null @@ -1 +0,0 @@ -../../../images \ No newline at end of file diff --git a/downstream/titles/controller/controller-user-guide/docinfo.xml b/downstream/titles/controller/controller-user-guide/docinfo.xml index ab2fc6ca91..03ebe0e483 100644 --- a/downstream/titles/controller/controller-user-guide/docinfo.xml +++ b/downstream/titles/controller/controller-user-guide/docinfo.xml @@ -1,9 +1,9 @@ -Automation controller user guide +Using automation execution Red Hat Ansible Automation Platform 2.5 -User Guide for Automation Controller +Use automation execution deploy, define, operate, scale and delegate automation - This guide describes the use of the Red Hat Ansible Automation Platform Controller (automation controller). + This guide shows you how to use automation controller to define, operate, scale and delegate automation across your enterprise. Red Hat Customer Content Services diff --git a/downstream/titles/controller/controller-user-guide/master.adoc b/downstream/titles/controller/controller-user-guide/master.adoc index e530de6e13..4fe0843e49 100644 --- a/downstream/titles/controller/controller-user-guide/master.adoc +++ b/downstream/titles/controller/controller-user-guide/master.adoc @@ -9,48 +9,71 @@ include::attributes/attributes.adoc[] // Book Title -= Automation controller user guide += Using automation execution Thank you for your interest in {PlatformName} {ControllerName}. {ControllerNameStart} helps teams manage complex multitiered deployments by adding control, knowledge, and delegation to Ansible-powered environments. -The {ControllerNameStart} User Guide describes all of the functionality available in {ControllerName}. +Using {ControllerName} describes all of the functionality available in {ControllerName}. It assumes moderate familiarity with Ansible, including concepts such as playbooks, variables, and tags. For more information about these and other Ansible concepts, see the link:https://docs.ansible.com/[Ansible documentation]. include::{Boilerplate}[] include::platform/assembly-UG-overview.adoc[leveloffset=+1] -include::platform/assembly-controller-licensing.adoc[leveloffset=+1] +//Moved to Access management doc +//include::platform/assembly-controller-licensing.adoc[leveloffset=+1] include::platform/assembly-controller-login.adoc[leveloffset=+1] -include::platform/assembly-controller-managing-subscriptions.adoc[leveloffset=+1] +//Moved to Access management doc +//include::platform/assembly-controller-managing-subscriptions.adoc[leveloffset=+1] +//Rewritten for 2.5 include::platform/assembly-controller-user-interface.adoc[leveloffset=+1] include::platform/assembly-controller-search.adoc[leveloffset=+1] -include::platform/assembly-controller-organizations.adoc[leveloffset=+1] -include::platform/assembly-controller-users.adoc[leveloffset=+1] -include::platform/assembly-controller-teams.adoc[leveloffset=+1] -include::platform/assembly-controller-credentials.adoc[leveloffset=+1] -//In the new UI, Credential types is part of Credentials. -include::platform/assembly-controller-custom-credentials.adoc[leveloffset=+1] -include::platform/assembly-controller-secret-management.adoc[leveloffset=+1] -include::platform/assembly-controller-applications.adoc[leveloffset=+1] -include::platform/assembly-controller-execution-environments.adoc[leveloffset=+1] -include::platform/assembly-controller-ee-setup-reference.adoc[leveloffset=+1] +//Jobs +include::platform/assembly-ug-controller-jobs.adoc[leveloffset=+1] +//Templates +include::platform/assembly-ug-controller-job-templates.adoc[leveloffset=+1] +include::platform/assembly-ug-controller-job-slicing.adoc[leveloffset=+1] +//This includes workflow approvals. +include::platform/assembly-ug-controller-workflow-job-templates.adoc[leveloffset=+1] +include::platform/assembly-ug-controller-workflows.adoc[leveloffset=+1] +//Schedules +include::platform/assembly-ug-controller-schedules.adoc[leveloffset=+1] +//Projects include::platform/assembly-controller-projects.adoc[leveloffset=+1] include::platform/assembly-controller-project-signing.adoc[leveloffset=+1] +//Infrastructure-Topology View +include::platform/assembly-controller-topology-viewer.adoc[leveloffset=+1] +//Infrastructure-Inventories include::platform/assembly-controller-inventories.adoc[leveloffset=+1] include::platform/assembly-controller-inventory-templates.adoc[leveloffset=+1] -include::platform/assembly-ug-controller-job-templates.adoc[leveloffset=+1] -include::platform/assembly-ug-controller-job-slicing.adoc[leveloffset=+1] -include::platform/assembly-ug-controller-workflows.adoc[leveloffset=+1] -include::platform/assembly-ug-controller-workflow-job-templates.adoc[leveloffset=+1] +//Adding short Hosts assembly +include::platform/assembly-controller-hosts.adoc[leveloffset=+1] +//Infrastructure-Instance Groups include::platform/assembly-ug-controller-instance-groups.adoc[leveloffset=+1] -include::platform/assembly-ug-controller-jobs.adoc[leveloffset=+1] -include::platform/assembly-ug-controller-work-with-webhooks.adoc[leveloffset=+1] +include::platform/assembly-ag-instance-and-container-groups.adoc[leveloffset=+1] +//Infrastructure-Instances +include::platform/assembly-controller-instances.adoc[leveloffset=+1] +//Infrastructure-Execution environments +include::platform/assembly-controller-execution-environments.adoc[leveloffset=+1] +include::platform/assembly-controller-ee-setup-reference.adoc[leveloffset=+1] +//Moved to Donna's Access management document +//include::platform/assembly-controller-organizations.adoc[leveloffset=+1] +//include::platform/assembly-controller-users.adoc[leveloffset=+1] +//include::platform/assembly-controller-teams.adoc[leveloffset=+1] +//Possibly in Donna's credentials document +include::platform/assembly-controller-credentials.adoc[leveloffset=+1] +include::platform/assembly-controller-custom-credentials.adoc[leveloffset=+1] +include::platform/assembly-controller-activity-stream.adoc[leveloffset=+1] +//Moved to admin guide +//include::platform/assembly-controller-secret-management.adoc[leveloffset=+1] +//include::platform/assembly-controller-applications.adoc[leveloffset=+1] include::platform/assembly-ug-controller-notifications.adoc[leveloffset=+1] include::platform/assembly-ug-controller-attributes-custom-notifications.adoc[leveloffset=+1] -include::platform/assembly-ug-controller-schedules.adoc[leveloffset=+1] +include::platform/assembly-ug-controller-work-with-webhooks.adoc[leveloffset=+1] include::platform/assembly-ug-controller-setting-up-insights.adoc[leveloffset=+1] include::platform/assembly-controller-best-practices.adoc[leveloffset=+1] -include::platform/assembly-controller-security.adoc[leveloffset=+1] +//RBAC contents to Donna's document, Jobs info to Jobs. +//Moved to admin guide. +//include::platform/assembly-controller-security.adoc[leveloffset=+1] include::platform/assembly-controller-glossary.adoc[leveloffset=+1] diff --git a/downstream/titles/develop-automation-content/docinfo.xml b/downstream/titles/develop-automation-content/docinfo.xml index 5c04d7e7de..1473821ead 100644 --- a/downstream/titles/develop-automation-content/docinfo.xml +++ b/downstream/titles/develop-automation-content/docinfo.xml @@ -1,7 +1,7 @@ -Developing Ansible automation content +Developing automation content Red Hat Ansible Automation Platform 2.5 -Install Ansible Automation Platform +Develop Ansible automation content to run automation jobs This guide describes how to develop Ansible automation content and how to use it to run automation jobs from Red Hat Ansible Automation Platforms. diff --git a/downstream/titles/develop-automation-content/master.adoc b/downstream/titles/develop-automation-content/master.adoc index 23b3d8ab91..1038dddf7c 100644 --- a/downstream/titles/develop-automation-content/master.adoc +++ b/downstream/titles/develop-automation-content/master.adoc @@ -7,7 +7,7 @@ include::attributes/attributes.adoc[] // Book Title -= Developing Ansible automation content += Developing automation content Thank you for your interest in {PlatformName}. {PlatformNameShort} is a commercial offering that helps teams manage complex multi-tier deployments by adding control, knowledge, and delegation to Ansible-powered environments. @@ -19,8 +19,14 @@ include::{Boilerplate}[] include::devtools/assembly-devtools-intro.adoc[leveloffset=+1] include::devtools/assembly-developer-workflow.adoc[leveloffset=+1] include::devtools/assembly-devtools-install.adoc[leveloffset=+1] -include::devtools/assembly-devtools-setup.adoc[leveloffset=+1] +// ----- include::devtools/assembly-devtools-setup.adoc[leveloffset=+1] include::devtools/assembly-creating-playbook-project.adoc[leveloffset=+1] include::devtools/assembly-writing-running-playbook.adoc[leveloffset=+1] -// include::devtools/assembly-testing-playbooks.adoc[leveloffset=+1] +// ----- include::devtools/assembly-testing-playbooks.adoc[leveloffset=+1] +include::devtools/assembly-publishing-playbook-collection-aap.adoc[leveloffset=+1] + +// Roles collections + +include::devtools/assembly-devtools-develop-collections.adoc[leveloffset=+1] +include::devtools/assembly-devtools-create-roles-collection.adoc[leveloffset=+1] diff --git a/downstream/titles/develop-automation-content/snippets b/downstream/titles/develop-automation-content/snippets new file mode 120000 index 0000000000..7bf6da9a51 --- /dev/null +++ b/downstream/titles/develop-automation-content/snippets @@ -0,0 +1 @@ +../../snippets \ No newline at end of file diff --git a/downstream/titles/eda/eda-getting-started-guide/aap-common b/downstream/titles/eda/eda-getting-started-guide/aap-common deleted file mode 120000 index fa02a580b8..0000000000 --- a/downstream/titles/eda/eda-getting-started-guide/aap-common +++ /dev/null @@ -1 +0,0 @@ -../../../aap-common \ No newline at end of file diff --git a/downstream/titles/eda/eda-getting-started-guide/attributes b/downstream/titles/eda/eda-getting-started-guide/attributes deleted file mode 120000 index 0d100da61c..0000000000 --- a/downstream/titles/eda/eda-getting-started-guide/attributes +++ /dev/null @@ -1 +0,0 @@ -../../../attributes \ No newline at end of file diff --git a/downstream/titles/eda/eda-getting-started-guide/eda b/downstream/titles/eda/eda-getting-started-guide/eda deleted file mode 120000 index 2a8c0ea9aa..0000000000 --- a/downstream/titles/eda/eda-getting-started-guide/eda +++ /dev/null @@ -1 +0,0 @@ -../../../assemblies/eda \ No newline at end of file diff --git a/downstream/titles/eda/eda-getting-started-guide/images b/downstream/titles/eda/eda-getting-started-guide/images deleted file mode 120000 index 4dd3347de1..0000000000 --- a/downstream/titles/eda/eda-getting-started-guide/images +++ /dev/null @@ -1 +0,0 @@ -../../../images \ No newline at end of file diff --git a/downstream/titles/eda/eda-user-guide/docinfo.xml b/downstream/titles/eda/eda-user-guide/docinfo.xml index 4d49b33a80..4a544938ec 100644 --- a/downstream/titles/eda/eda-user-guide/docinfo.xml +++ b/downstream/titles/eda/eda-user-guide/docinfo.xml @@ -1,7 +1,7 @@ -Event-Driven Ansible controller user guide +Using automation decisions Red Hat Ansible Automation Platform 2.5 -Learn to configure and use {EDAcontroller} to enhance and expand automation +Configure and use {EDAcontroller} to enhance and expand automation Learn how to configure your {EDAcontroller} to set up credentials, new projects, decision environments, tokens to authenticate to Ansible Automation Platform Controller, and rulebook activation. diff --git a/downstream/titles/eda/eda-user-guide/master.adoc b/downstream/titles/eda/eda-user-guide/master.adoc index 087b29f56d..788c3be7a7 100644 --- a/downstream/titles/eda/eda-user-guide/master.adoc +++ b/downstream/titles/eda/eda-user-guide/master.adoc @@ -6,16 +6,24 @@ include::attributes/attributes.adoc[] // Book Title -= Event-Driven Ansible controller user guide += Using automation decisions -{EDAcontroller} is a new way to enhance and expand automation by improving IT speed and agility while enabling consistency and resilience. +{EDAcontroller} is a new way to enhance and expand automation by improving IT speed and agility while enabling consistency and resilience. Developed by Red Hat, this feature is designed for simplicity and flexibility. include::{Boilerplate}[] include::eda/assembly-eda-user-guide-overview.adoc[leveloffset=+1] include::eda/assembly-eda-credentials.adoc[leveloffset=+1] +include::eda/assembly-eda-credential-types.adoc[leveloffset=+1] + include::eda/assembly-eda-projects.adoc[leveloffset=+1] include::eda/assembly-eda-decision-environments.adoc[leveloffset=+1] -include::eda/assembly-eda-set-up-token.adoc[leveloffset=+1] +include::eda/assembly-simplified-event-routing.adoc[leveloffset=+1] +include::eda/assembly-eda-set-up-rhaap-credential.adoc[leveloffset=+1] +//include::eda/assembly-eda-set-up-token.adoc[leveloffset=+1] include::eda/assembly-eda-rulebook-activations.adoc[leveloffset=+1] +include::eda/assembly-eda-rulebook-troubleshooting.adoc[leveloffset=+1] include::eda/assembly-eda-rule-audit.adoc[leveloffset=+1] +include::eda/assembly-eda-performance-tuning.adoc[leveloffset=+1] +include::eda/assembly-eda-event-filter-plugins.adoc[leveloffset=+1] +include::eda/assembly-eda-logging-strategy.adoc[leveloffset=+1] diff --git a/downstream/titles/controller/controller-getting-started/aap-common b/downstream/titles/edge-manager/edge-manager-user-guide/aap-common similarity index 100% rename from downstream/titles/controller/controller-getting-started/aap-common rename to downstream/titles/edge-manager/edge-manager-user-guide/aap-common diff --git a/downstream/titles/analytics/automation-savings-planner/attributes b/downstream/titles/edge-manager/edge-manager-user-guide/attributes similarity index 100% rename from downstream/titles/analytics/automation-savings-planner/attributes rename to downstream/titles/edge-manager/edge-manager-user-guide/attributes diff --git a/downstream/titles/aap-plugin-rhdh/docinfo.xml b/downstream/titles/edge-manager/edge-manager-user-guide/docinfo.xml similarity index 55% rename from downstream/titles/aap-plugin-rhdh/docinfo.xml rename to downstream/titles/edge-manager/edge-manager-user-guide/docinfo.xml index f3f147268e..c2787ca9f7 100644 --- a/downstream/titles/aap-plugin-rhdh/docinfo.xml +++ b/downstream/titles/edge-manager/edge-manager-user-guide/docinfo.xml @@ -1,9 +1,9 @@ -Ansible plug-ins for Red Hat Developer Hub + Red Hat Edge Manager Red Hat Ansible Automation Platform 2.5 -Install and use Ansible plug-ins for Red Hat Developer Hub +Enable Red Hat Edge Manager - This guide describes how to install and use Ansible plug-ins for Red Hat Developer Hub. + This guide shows you how to install, configure, and use {RedHatEdge} to manage individual and fleets of devices. Red Hat Customer Content Services diff --git a/downstream/titles/analytics/automation-savings-planner/images b/downstream/titles/edge-manager/edge-manager-user-guide/images similarity index 100% rename from downstream/titles/analytics/automation-savings-planner/images rename to downstream/titles/edge-manager/edge-manager-user-guide/images diff --git a/downstream/titles/edge-manager/edge-manager-user-guide/master.adoc b/downstream/titles/edge-manager/edge-manager-user-guide/master.adoc new file mode 100644 index 0000000000..47675d1b62 --- /dev/null +++ b/downstream/titles/edge-manager/edge-manager-user-guide/master.adoc @@ -0,0 +1,20 @@ +:imagesdir: images +:numbered: +:toclevels: 1 +:experimental: + +include::attributes/attributes.adoc[] + +// Book Title += Enabling Red Hat Edge Manager + +{RedHatEdge} aims to give simple, scalable, and secure management of edge devices and applications. +You can declare the operating system version, host configuration, and set of applications that you want to run on an individual device or a whole fleet of devices. +{RedHatEdge} rolls out the target configuration to devices where a device agent automatically applies them and report progress and health status back up. + +include::{Boilerplate}[] + +include::platform/assembly-edge-manager-manage-devices.adoc[leveloffset=+1] +include::platform/assembly-edge-manager-install.adoc[leveloffset=+1] +include::platform/assembly-edge-manager-config.adoc[leveloffset=+1] +include::platform/assembly-edge-manager-troubleshooting.adoc[leveloffset=+1] diff --git a/downstream/titles/controller/controller-getting-started/platform b/downstream/titles/edge-manager/edge-manager-user-guide/platform similarity index 100% rename from downstream/titles/controller/controller-getting-started/platform rename to downstream/titles/edge-manager/edge-manager-user-guide/platform diff --git a/downstream/titles/getting-started/aap-common b/downstream/titles/getting-started/aap-common new file mode 120000 index 0000000000..472eeb4dac --- /dev/null +++ b/downstream/titles/getting-started/aap-common @@ -0,0 +1 @@ +../../aap-common \ No newline at end of file diff --git a/downstream/titles/getting-started/attributes b/downstream/titles/getting-started/attributes new file mode 120000 index 0000000000..a5caaa73a5 --- /dev/null +++ b/downstream/titles/getting-started/attributes @@ -0,0 +1 @@ +../../attributes \ No newline at end of file diff --git a/downstream/titles/getting-started/docinfo.xml b/downstream/titles/getting-started/docinfo.xml new file mode 100644 index 0000000000..e1b8436be6 --- /dev/null +++ b/downstream/titles/getting-started/docinfo.xml @@ -0,0 +1,11 @@ +Getting started with Ansible Automation Platform +Red Hat Ansible Automation Platform +2.5 +Get started with Ansible Automation Platform + + This guide shows how to get started with Ansible Automation Platform. + + + Red Hat Customer Content Services + + diff --git a/downstream/titles/getting-started/eda b/downstream/titles/getting-started/eda new file mode 120000 index 0000000000..4f3e9af334 --- /dev/null +++ b/downstream/titles/getting-started/eda @@ -0,0 +1 @@ +../../assemblies/eda \ No newline at end of file diff --git a/downstream/titles/getting-started/images b/downstream/titles/getting-started/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/downstream/titles/getting-started/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/downstream/titles/getting-started/master.adoc b/downstream/titles/getting-started/master.adoc new file mode 100644 index 0000000000..5f5956a8ca --- /dev/null +++ b/downstream/titles/getting-started/master.adoc @@ -0,0 +1,26 @@ +:imagesdir: images +:numbered: +:toclevels: 1 + +:experimental: + +:controller-GS: + +include::attributes/attributes.adoc[] + + +// Book Title += Getting started with Ansible Automation Platform + +{PlatformName} is a unified automation solution that automates a variety of IT processes, including provisioning, configuration management, application deployment, orchestration, and security and compliance changes (including patching systems). + +{PlatformNameShort} features a platform interface where you can set up centralized authentication, configure access management, and execute automation tasks from a single location. + +This guide will help you get started with {PlatformNameShort} by introducing three central concepts: automation execution, automation decisions, and automation content. + +include::{Boilerplate}[] + +include::platform/assembly-gs-key-functionality.adoc[leveloffset=+1] +include::platform/assembly-gs-platform-admin.adoc[leveloffset=+1] +include::platform/assembly-gs-auto-dev.adoc[leveloffset=+1] +include::platform/assembly-gs-auto-op.adoc[leveloffset=+1] diff --git a/downstream/titles/getting-started/platform b/downstream/titles/getting-started/platform new file mode 120000 index 0000000000..06b49528ee --- /dev/null +++ b/downstream/titles/getting-started/platform @@ -0,0 +1 @@ +../../assemblies/platform \ No newline at end of file diff --git a/downstream/titles/getting-started/snippets b/downstream/titles/getting-started/snippets new file mode 120000 index 0000000000..7bf6da9a51 --- /dev/null +++ b/downstream/titles/getting-started/snippets @@ -0,0 +1 @@ +../../snippets \ No newline at end of file diff --git a/downstream/titles/hub/getting-started/aap-common b/downstream/titles/hub/getting-started/aap-common deleted file mode 120000 index fa02a580b8..0000000000 --- a/downstream/titles/hub/getting-started/aap-common +++ /dev/null @@ -1 +0,0 @@ -../../../aap-common \ No newline at end of file diff --git a/downstream/titles/hub/getting-started/attributes b/downstream/titles/hub/getting-started/attributes deleted file mode 120000 index 8615cf3107..0000000000 --- a/downstream/titles/hub/getting-started/attributes +++ /dev/null @@ -1 +0,0 @@ -../../../attributes/ \ No newline at end of file diff --git a/downstream/titles/hub/getting-started/hub b/downstream/titles/hub/getting-started/hub deleted file mode 120000 index 8185591f40..0000000000 --- a/downstream/titles/hub/getting-started/hub +++ /dev/null @@ -1 +0,0 @@ -../../../assemblies/hub \ No newline at end of file diff --git a/downstream/titles/hub/managing-content/docinfo.xml b/downstream/titles/hub/managing-content/docinfo.xml index 1c27ab4354..26b6154d33 100644 --- a/downstream/titles/hub/managing-content/docinfo.xml +++ b/downstream/titles/hub/managing-content/docinfo.xml @@ -1,4 +1,4 @@ -Managing content in automation hub +Managing automation content Red Hat Ansible Automation Platform 2.5 Create and manage collections, content and repositories in automation hub diff --git a/downstream/titles/hub/managing-content/master.adoc b/downstream/titles/hub/managing-content/master.adoc index 943dd4c74f..ccc26c42c3 100644 --- a/downstream/titles/hub/managing-content/master.adoc +++ b/downstream/titles/hub/managing-content/master.adoc @@ -4,13 +4,31 @@ :experimental: include::attributes/attributes.adoc[] -= Managing content in automation hub += Managing automation content include::{Boilerplate}[] include::hub/assembly-managing-cert-valid-content.adoc[leveloffset=+1] +include::hub/assembly-syncing-to-cloud-repo.adoc[leveloffset=+2] +include::hub/assembly-synclists.adoc[leveloffset=+2] +include::hub/assembly-collections-and-content-signing-in-pah.adoc[leveloffset=+2] +//include::hub/assembly-faq.adoc[leveloffset=+2] +include::hub/assembly-validated-content.adoc[leveloffset=+2] include::hub/assembly-managing-collections-hub.adoc[leveloffset=+1] +include::hub/assembly-working-with-namespaces.adoc[leveloffset=+2] +include::hub/assembly-managing-private-collections.adoc[leveloffset=+2] +include::hub/assembly-repo-management.adoc[leveloffset=+2] +include::hub/assembly-remote-management.adoc[leveloffset=+2] +include::hub/assembly-repo-sync.adoc[leveloffset=+2] +include::hub/assembly-collection-import-export.adoc[leveloffset=+2] include::hub/assembly-managing-containers-hub.adoc[leveloffset=+1] +include::hub/assembly-managing-container-registry.adoc[leveloffset=+2] +include::hub/assembly-container-user-access.adoc[leveloffset=+2] +include::hub/assembly-populate-container-registry.adoc[leveloffset=+2] +include::hub/assembly-setup-container-repository.adoc[leveloffset=+2] +include::hub/assembly-pull-image.adoc[leveloffset=+2] +include::hub/assembly-working-with-signed-containers.adoc[leveloffset=+2] +include::hub/assembly-delete-container.adoc[leveloffset=+2] diff --git a/downstream/titles/navigator-guide/docinfo.xml b/downstream/titles/navigator-guide/docinfo.xml index 90c42afc4e..4d80c41c38 100644 --- a/downstream/titles/navigator-guide/docinfo.xml +++ b/downstream/titles/navigator-guide/docinfo.xml @@ -1,4 +1,4 @@ -Automation content navigator creator guide +Using content navigator Red Hat Ansible Automation Platform 2.5 Develop content that is compatible with Ansible Automation Platform diff --git a/downstream/titles/navigator-guide/master.adoc b/downstream/titles/navigator-guide/master.adoc index 7cf601b9b4..e5f85c2fcb 100644 --- a/downstream/titles/navigator-guide/master.adoc +++ b/downstream/titles/navigator-guide/master.adoc @@ -7,7 +7,7 @@ include::attributes/attributes.adoc[] // Book Title -= Automation content navigator creator guide += Using content navigator include::{Boilerplate}[] diff --git a/downstream/titles/ocp_performance_guide/docinfo.xml b/downstream/titles/ocp_performance_guide/docinfo.xml index 9f9f964eae..0771bd481b 100644 --- a/downstream/titles/ocp_performance_guide/docinfo.xml +++ b/downstream/titles/ocp_performance_guide/docinfo.xml @@ -1,4 +1,4 @@ -Red Hat Ansible Automation Platform performance considerations for operator based installations +Performance considerations for operator environments Red Hat Ansible Automation Platform 2.5 diff --git a/downstream/titles/ocp_performance_guide/master.adoc b/downstream/titles/ocp_performance_guide/master.adoc index 4cdc6cdc3b..f6a84bda3d 100644 --- a/downstream/titles/ocp_performance_guide/master.adoc +++ b/downstream/titles/ocp_performance_guide/master.adoc @@ -8,7 +8,7 @@ include::attributes/attributes.adoc[] :context: ocp-performance // Book Title -= Red Hat Ansible Automation Platform performance considerations for operator based installations += Performance considerations for operator environments Deploying applications to a container orchestration platform such as {OCP} provides a number of advantages from an operational perspective. For example, an update to the base image of an application can be made through a simple in-place upgrade with little to no disruption. diff --git a/downstream/titles/operator-mesh/docinfo.xml b/downstream/titles/operator-mesh/docinfo.xml index 9e7f41bea0..d7f34fed3b 100644 --- a/downstream/titles/operator-mesh/docinfo.xml +++ b/downstream/titles/operator-mesh/docinfo.xml @@ -1,4 +1,4 @@ -Red Hat Ansible Automation Platform automation mesh for operator-based installations +Automation mesh for managed cloud or operator environments Red Hat Ansible Automation Platform 2.5 Automate at scale in a cloud-native way diff --git a/downstream/titles/operator-mesh/master.adoc b/downstream/titles/operator-mesh/master.adoc index aefdfd47a8..28c57b522c 100644 --- a/downstream/titles/operator-mesh/master.adoc +++ b/downstream/titles/operator-mesh/master.adoc @@ -9,7 +9,7 @@ include::attributes/attributes.adoc[] // Book Title -= Red Hat Ansible Automation Platform automation mesh for operator-based installations += Automation mesh for managed cloud or operator environments Thank you for your interest in {PlatformName}. {PlatformNameShort} is a commercial offering that helps teams manage complex multi-tier deployments by adding control, knowledge, and delegation to Ansible-powered environments. diff --git a/downstream/titles/playbooks/playbooks-getting-started/docinfo.xml b/downstream/titles/playbooks/playbooks-getting-started/docinfo.xml index c6bc31a01b..5dc012241f 100644 --- a/downstream/titles/playbooks/playbooks-getting-started/docinfo.xml +++ b/downstream/titles/playbooks/playbooks-getting-started/docinfo.xml @@ -1,9 +1,10 @@ -Getting started with Ansible Playbooks +Getting started with playbooks Red Hat Ansible Automation Platform 2.5 -Getting started with ansible playbooks +Get started with Ansible Playbooks - Learn how to set up an ansible playbook. + This guide shows how to create and use playbooks to address your automation requirements. + This document includes content from the upstream docs.ansible.com documentation, which is covered by the GNU GENERAL PUBLIC LICENSE v3.0. Red Hat Customer Content Services diff --git a/downstream/titles/playbooks/playbooks-getting-started/master.adoc b/downstream/titles/playbooks/playbooks-getting-started/master.adoc index a98fa51619..8a71e75144 100644 --- a/downstream/titles/playbooks/playbooks-getting-started/master.adoc +++ b/downstream/titles/playbooks/playbooks-getting-started/master.adoc @@ -8,7 +8,7 @@ include::attributes/attributes.adoc[] // Book Title -= Getting started with Ansible Playbooks += Getting started with playbooks Thank you for your interest in {PlatformName}. {PlatformNameShort} is a commercial offering that helps teams manage complex multi-tier deployments by adding control, knowledge, and delegation to Ansible-powered environments. @@ -16,10 +16,7 @@ This guide provides an introduction to the use of Ansible Playbooks.. include::{Boilerplate}[] -//include::playbooks/assembly-playbook-gs.adoc[leveloffset=+1] include::playbooks/assembly-intro-to-playbooks.adoc[leveloffset=+1] include::playbooks/assembly-networking-playbook.adoc[leveloffset=+1] include::playbooks/assembly-playbook-practical-example.adoc[leveloffset=+1] - - - +include::playbooks/assembly-open-source-license.adoc[leveloffset=+1] diff --git a/downstream/titles/playbooks/playbooks-reference/docinfo.xml b/downstream/titles/playbooks/playbooks-reference/docinfo.xml index 34e0e320f9..58966318a4 100644 --- a/downstream/titles/playbooks/playbooks-reference/docinfo.xml +++ b/downstream/titles/playbooks/playbooks-reference/docinfo.xml @@ -1,11 +1,11 @@ -Reference Guide to Ansible Playbooks +Reference guide to Ansible Playbooks Red Hat Ansible Automation Platform 2.5 -Reference Guide to Ansible Playbooks +Learn about the different approaches for creating playbooks This guide provides a reference for the differing approaches to the creating of Ansible playbooks. Red Hat Customer Content Services - \ No newline at end of file + diff --git a/downstream/titles/playbooks/playbooks-reference/master.adoc b/downstream/titles/playbooks/playbooks-reference/master.adoc index 40f9d25164..251e74881b 100644 --- a/downstream/titles/playbooks/playbooks-reference/master.adoc +++ b/downstream/titles/playbooks/playbooks-reference/master.adoc @@ -8,7 +8,7 @@ include::attributes/attributes.adoc[] // Book Title -= Reference Guide for Ansible Playbooks += Reference guide to Ansible Playbooks Thank you for your interest in {PlatformName}. {PlatformNameShort} is a commercial offering that helps teams manage complex multi-tier deployments by adding control, knowledge, and delegation to Ansible-powered environments. @@ -16,4 +16,4 @@ This guide provides a reference for the differing approaches to the creating of include::{Boilerplate}[] -include::playbooks/assembly-reference-test.adoc[leveloffset=+1] \ No newline at end of file +include::playbooks/assembly-reference-test.adoc[leveloffset=+1] diff --git a/downstream/titles/release-notes/async/aap-25-1-7-oct.adoc b/downstream/titles/release-notes/async/aap-25-1-7-oct.adoc new file mode 100644 index 0000000000..17443ba194 --- /dev/null +++ b/downstream/titles/release-notes/async/aap-25-1-7-oct.adoc @@ -0,0 +1,48 @@ +//This is the working version of the patch release notes document. + +[[aap-25-1-7-oct]] + + += {PlatformNameShort} patch release October 7, 2024 + +The following enhancements and fixes have been implemented in this release of {PlatformName}. + +== Enhancements + +* {EDAName} workers and scheduler add timeout and retry resilience when communicating with a Redis cluster. (AAP-32139) + +* Removed the *MTLS* credential type that was incorrectly added. (AAP-31848) + +== Fixed issues + +=== {PlatformNameShort} + +* Fixed conditional that was skipping necessary tasks in the restore role, which was causing restores to not finish reconciling. (AAP-30437) + +* Systemd services in the containerized installer are now set with restart policy set to *always* by default. (AAP-31824) + +* *FLUSHDB* is now modified to account for shared usage of a Redis database. It now respects access limitations by removing only those keys that the client has permissions to. (AAP-32138) + +* Added a fix to ensure default *extra_vars* values are rendered in the *Prompt on launch* wizard. (AAP-30585) + +* Filtered out the unused *ANSIBLE_BASE_* settings from the environment variable in job execution. (AAP-32208) + + +=== {EDAName} + +* Configured the setting *EVENT_STREAM_MTLS_BASE_URL* to the correct default to ensure MTLS is disallowed in the RPM installer. (AAP-32027) + +* Configured the setting *EVENT_STREAM_MTLS_BASE_URL* to the correct default to ensure MTLS is disallowed in the containerized installer. (AAP-31851) + +* Fixed a bug where the {EDAName} workers and scheduler are unable to reconnect to the Redis cluster if a primary Redis node enters a *failed* state and a new primary node is promoted. See the KCS article link:https://access.redhat.com/articles/7088545[Redis failover causes {EDAName} activation failures] that include the steps that were necessary before this bug was fixed. (AAP-30722) + +== Advisories +The following errata advisories are included in this release: + +* link:https://access.redhat.com/errata/RHBA-2024:7756[RHBA-2024:7756 - Product Release Update] + +* link:https://access.redhat.com/errata/RHBA-2024:7760[RHBA-2024:7760 - Container Release Update] + +* link:https://access.redhat.com/errata/RHBA-2024:7766[RHBA-2024:7766 - Cluster Scoped Container Release Update] + +* link:https://access.redhat.com/errata/RHBA-2024:7810[RHBA-2024:7810 - Setup Bundle Release Update] diff --git a/downstream/titles/release-notes/async/aap-25-12-18-dec.adoc b/downstream/titles/release-notes/async/aap-25-12-18-dec.adoc new file mode 100644 index 0000000000..0080d26276 --- /dev/null +++ b/downstream/titles/release-notes/async/aap-25-12-18-dec.adoc @@ -0,0 +1,171 @@ +[[aap-25-12-18-dec]] + += {PlatformNameShort} patch release December 18, 2024 + +The following enhancements and bug fixes have been implemented in this release of {PlatformNameShort}. + +== Enhancements + +=== {PlatformNameShort} + +* Added help text to all missing fields in {PlatformNameShort} gateway and `django-ansible-base`. (AAP-37068) + +* Consistently formatted sentence structure for `help_text`, and provided more context in the help text where it was vague.(AAP-37016) + +* Added dynamic preferences for usage by {Analytics}.(AAP-36710) + +** `INSIGHTS_TRACKING_STATE`: Enables the service to gather data on automation and send it to {Analytics}. + +** `RED_HAT_CONSOLE_URL`: This setting is used to to configure the upload URL for data collection for {Analytics}. + +** `REDHAT_USERNAME`: Username used to send data to {Analytics}. + +** `REDHAT_PASSWORD`: Password for the account used to send data to {Analytics}. + +** `SUBSCRIPTIONS_USERNAME`: Username is used to retrieve subscription and content information. + +** `SUBSCRIPTIONS_PASSWORD`: Password is used to retrieve subscription and content information. + +** `AUTOMATION_ANALYTICS_GATHER_INTERVAL`: interval in seconds at which {Analytics} gathers data. + +* Added an enabled flag for turning authenticator maps on or off. (AAP-36709) + +* `aap-metrics-utility` has been updated to 0.4.1. (AAP-36393) + +* Added the setting `trusted_header_timeout_in_ns` to timegate `X_TRUSTED_PROXY_HEADER` validation in the `django-ansible-base` libraries used by {PlatformNameShort} components. (AAP-36712) + + +=== Documentation updates + +* With this update, the {OperatorPlatformNameShort} growth topology and {OperatorPlatformNameShort} enterprise topology have been updated to include s390x (IBM Z) architecture test support. + + +=== {EDAName} + +* Extended the scope of the `log_level` and debug settings. (AAP-33669) + +* A project can now be synced with the {EDAName} collection modules. (AAP-32264) + +* In the Rulebook activation create form, selecting a project is now required before selecting a rulebook.(AAP-28082) + +* The btn:[Create credentials] button is now visible irrespective of whether there are any existing credentials or not.(AAP-23707) + + +== Bug fixes + +=== General + +* Fixed an issue where `django-ansible-base` fallback cache kept creating a *tmp* file even if the *LOCATION* was set to another path.(AAP-36869) + +* Fixed an issue where the OIDC authenticator was not allowed to use the JSON key to extract user groups, or for a user to be modified via the new `GROUPS_CLAIM` configuration setting.(AAP-36716) + + +With this update, the following CVEs have been addressed: + +* link:https://access.redhat.com/security/cve/cve-2024-11079[CVE-2024-11079] `ansible-core`: Unsafe Tagging Bypass via `hostvars` Object in Ansible-Core.(AAP-35563) + +* link:https://access.redhat.com/security/cve/cve-2024-53908[CVE-2024-53908] `ansible-lightspeed-container`: Potential SQL injection in `HasKey(lhs, rhs)` on Oracle.(AAP-36767) + +* link:https://access.redhat.com/security/cve/cve-2024-53907[CVE-2024-53907] `ansible-lightspeed-container`: Potential denial-of-service in `django.utils.html.strip_tags()`.(AAP-36755) + +* link:https://access.redhat.com/security/cve/cve-2024-11483[CVE-2024-11483] which allowed users to escape the scope of their personal access *OAuth2* tokens, from read-scoped to read-write-scoped, in the gateway.(AAP-36261) + + +=== {PlatformName} + +* Fixed an issue where when role user assignments were queried in the platform UI, the query is successful about 75% of the time.(AAP-36872) + +* Fixed an issue where the user was unable to filter job templates by *label* in {PlatformNameShort} 2.5.(AAP-36540) + +* Fixed an issue where it was not possible to open a job template after removing the user that created the template.(AAP-35820) + +* Fixed an issue where the inventory source update failed, and did not allow selection of the inventory file.(AAP-35246) + +* Fixed an issue where the *Login Redirect Override* setting was missing and not functioning as expected in {PlatformNameShort} 2.5.(AAP-33295) + +* Fixed an issue where users were able to select a credential that required a password when defining a schedule.(AAP-32821) + +* Fixed an issue where the job output did not show unless you switched tabs. This also fixed other display issues.(AAP-31125) + +* Fixed an issue where adding a new Automation Decision role to a team did not work from the {MenuAMTeams} navigation path.(AAP-31873) + +* Fixed an issue where migration was missing from {PlatformNameShort}.(AAP-37015) + +* Fixed an issue where the gateway *OAuth* token was not encrypted at rest.(AAP-36715) + +* Fixed an issue where the API forces the user to save a service with an API port even if one does not exist.(AAP-36714) + +* Fixed an issue where the Gateway did not properly interpret SAML attributes for mappings.(AAP-36713) + +* Fixed an issue where non-self-signed *certificate+key* pairs were allowed to be used in SAML authenticator configurations.(AAP-36707) + +* Fixed an issue where the login page was not redirecting to `/api/gateway/v1` if a user was already logged in.(AAP-36638) + + +=== {HubNameMain} + +* When configuring an *Ansible Remote* to sync collections from other servers, a requirements file is only required for syncs from Galaxy, and optional otherwise. Without a requirements file, all collections are synced.(AAP-31238) + + +==== Container-based {PlatformNameShort} + +* Fixed an issue that allowed {ControllerName} nodes to override the `receptor_peers` variable. (AAP-37085) + +* Fixed an issue where the containerized installer ignored `receptor_type` for {ControllerName} hosts and always installed them as hybrid.(AAP-37012) + +* Fixed an issue where Podman was not present in the task container, and the cleanup image task failed.(AAP-37011) + +* Fixed an issue where only one {ControllerName} node was configured with Execution/Hop node peers rather than all {ControllerName} nodes.(AAP-36851) + +* Fixed an issue where the {ControllerName} services lost connection to the database, where the containers are stopped and the `systemd` unit does not try to restart.(AAP-36850) + +* Fixed an issue where receptor_type and `receptor_protocol` variables validation checks were skipped during the preflight role execution.(AAP-36857) + + +=== {EDAName} + +* Fixed an issue where the url field of the event stream was not updated if `EDA_EVENT_STREAM_BASE_URL` setting changed. (AAP-33819) + +* Fixed an issue where {EDAName} and {ControllerName} fields were pre-populated with gateway credentials when `secret: true` is set on custom credentials.(AAP-33188) + +* Fixed an issue where the bulk removal of selected role permissions disappeared when more than 4 permissions were selected.(AAP-28030) + + * Fixed an issue where *Enabled options* had its own scrollbar on the *Rulebook Activation Details* page.(AAP-31130) + +* Fixed an issue where the status of an activation was occasionally inconsistent with the status of the latest instance after a restart.(AAP-29755) + +* Fixed an issue where importing a project from a non-existing branch resulted in the completed state instead of a Failed status.(AAP-29144) + +* Fixed an issue with respect to the custom credential types where if the user clicked *The generate extra vars* before the `fields: key` in the input configuration it would create an empty line that is uneditable.(AAP-28084) + +* Fixed an issue where the project sync would not fail on an empty or unstructured git repository.(AAP-35777) + +* Fixed an issue where rulebook validation import/sync fails when a rulebook has a duplicated rule name.(AAP-35164) + +* Fixed an issue where the Event Driven Ansible API allowed a credential's type to be changed.(AAP-34968) + +* Fixed an issue where a previously failed project could be accidentally changed to *completed* after a resync.(AAP-34744) + +* Fixed an issue where no message was recorded when a project did not contain any rulebooks.(AAP-34555) + +* Fixed an issue where the name for credentials in the rulebook activation form field was not updated.(AAP-34123) + +* Updated the message for the rulebook activation/event streams for better clarity.(AAP-33485) + +* Fixed an issue where the source plugin was not able to use the `env vars` to establish a successful connection to the remote source.(AAP-35597) + +* Fixed an issue in the collection where the activation module failed with a misleading error message if the rulebook, project, decision environment, or organization, could not be found.(AAP-35360) + +* Fixed an issue where the validation a host specified as part of a container registry credential did not conform to container registry standards. The specified host was previously able to use a non-syntactically valid host (name or net address) and optional port value `([:])`. The validation is now applied when creating a credential as well as when modifying an existing credential regardless of fields being modified.(AAP-34969) + +* Fixed an issue whereby multiple {PlatformName} credentials were being attached to activations.(AAP-34025) + +* Fixed an issue where there was an erroneous dependency on the existence of an organization named *Default*.(AAP-33551) + +* Fixed an issue where occasionally an activation is reported as running, before it is ready to receive events.(AAP-31225) + +* Fixed an issue where the user could not edit auto-generated *injector vars* while creating {EDAName} custom credentials.(AAP-29752) + +* Fixed an issue where in some cases the `file_watch` source plugin in an {EDAName} collection raised the *QueueFull* exception.(AAP-29139) + +* Fixed an issue where the {EDAName} database increased in size continuously, even if the database was unused. Addend the purge_record script to clean up outdated database records.(AAP-30684) diff --git a/downstream/titles/release-notes/async/aap-25-2-14-oct.adoc b/downstream/titles/release-notes/async/aap-25-2-14-oct.adoc new file mode 100644 index 0000000000..f388c316f1 --- /dev/null +++ b/downstream/titles/release-notes/async/aap-25-2-14-oct.adoc @@ -0,0 +1,39 @@ +[[aap-25-1-14-oct]] + += {PlatformNameShort} patch release October 14, 2024 + +The following fixes have been implemented in this release of {PlatformName}. + +== Fixed issues + +=== {PlatformNameShort} + +* Fixed an issue in {Gateway} where examining output logs for UWSGI shows a message that can be viewed as insensitive. (AAP-33213) + +* Fixed external Redis port configuration issue, which resulted in a `cluster_host` error when trying to connect to Redis. (AAP-32691) + +* Fixed a faulty conditional which was causing managed Redis to be deployed even if an external Redis was being configured. (AAP-31607) + +* After the initial deployment of {PlatformNameShort}, if you make changes to the {ControllerName}, {HubName}, or {EDAName} sections of the {PlatformNameShort} CR specification, those changes are now propagated to the component custom resources. (AAP-32350) + +* Fixed addressing issues when the filter `keep_keys` is used, all keys are removed from the dictionary. The `keepkey` fix is available in the updated `ansible.utils` collection. (AAP-32960) + +* Fixed an issue in `cisco.ios.ios_static_routes` where the metric distance is to be populated in the `forward_router_address` attribute. (AAP-32960) + +* Fixed an issue where {OperatorPlatformNameShort} is not transferring metric settings to the controller. (AAP-32073) + +* Fixed an issue where you have a schedule on a resource, such as a job template, that prompts for credentials, and you update the credential to be different from what is on the resource by default, the new credential is not submitted to the API and it does not get updated. (AAP-31957) + +* Fixed an issue where setting `*pg_host=` without any other context no longer results in an empty HOST section of `settings.py` in controller. (AAP-32440) + +== Advisories +The following errata advisories are included in this release: + +* link:https://access.redhat.com/errata/RHBA-2024:8079[RHBA-2024:8079 - Product Release Update] + +* link:https://access.redhat.com/errata/RHBA-2024:8084[RHBA-2024:8084 - Container Release Update] + +* link:https://access.redhat.com/errata/RHBA-2024:8096[RHBA-2024:8096 - Cluster Scoped Container Release Update] + +* link:https://access.redhat.com/errata/RHBA-2024:8141[RHBA-2024:8141 - Setup Bundle Release Update] + diff --git a/downstream/titles/release-notes/async/aap-25-20250115.adoc b/downstream/titles/release-notes/async/aap-25-20250115.adoc new file mode 100644 index 0000000000..652a3ea45d --- /dev/null +++ b/downstream/titles/release-notes/async/aap-25-20250115.adoc @@ -0,0 +1,110 @@ +[[aap-25-20250115]] + += {PlatformNameShort} patch release January 15, 2025 + +The following enhancements and bug fixes have been implemented in this release of {PlatformNameShort}. + +== Enhancements + +=== {PlatformNameShort} + +* With this update, the `ansible.controller` collection has been updated to 4.6.6.(AAP-38443) + +* Enhanced the *status API*, `/api/gateway/v1/status/`, from the *services* property within the JSON to an array. Consumers of this API can still request the previous format with a URL query parameter `service_keys=true`.(AAP-37903) + + +=== {OperatorPlatformNameShort} + +* Added the ability to configure `topology_spread_constraints, `node_selector, and `tolerations` for gateway deployments. (AAP-37193) + +=== Container-based {PlatformNameShort} + +* TLS certificate and key files are now validated during the preflight role execution. + +** If the TLS certificate file is provided then the TLS key file must be provided. + +** If the TLS key file is provided then the TLS certificate file must be provided. + +** Both TLS certificate and key modulus should match.(AAP-37845) + + + +== Bug fixes + +=== CVE + +With this update, the following CVEs have been addressed: + +* link:https://access.redhat.com/security/cve/cve-2024-52304[CVE-2024-52304] `python3.11-aiohttp`: `aiohttp` vulnerable to request smuggling due to incorrect parsing of chunk extensions.(AAP-36192) + +* link:https://access.redhat.com/security/cve/cve-2024-55565[CVE-2024-55565] `automation-gateway`: `nanoid` mishandles non-integer values.(AAP-37168) + +* link:https://access.redhat.com/security/cve/cve-2024-53908[CVE-2024-53908] `automation-controller`: Potential SQL injection in `HasKey(lhs, rhs)` on Oracle.(AAP-36769) + +* link:https://access.redhat.com/security/cve/cve-2024-53907[CVE-2024-53907] `automation-controller`: Potential denial-of-service in `django.utils.html.strip_tags()`.(AAP-36756) + +* link:https://access.redhat.com/security/cve/cve-2024-11407[CVE-2024-11407] `automation-controller`: Denial-of-Service through data corruption in `gRPC-C++`.(AAP-36745) + +* link:https://access.redhat.com/security/cve/cve-2024-52304[CVE-2024-52304] `ansible-lightspeed-container`: `aiohttp` vulnerable to request smuggling due to incorrect parsing of chunk extensions.(AAP-36185) + +* link:https://access.redhat.com/security/cve/cve-2024-56201[CVE-2024-56201] `ansible-lightspeed-container`: Jinja has a sandbox breakout through malicious filenames.(AAP-38079) + +* link:https://access.redhat.com/security/cve/cve-2024-56326[CVE-2024-56326] `ansible-lightspeed-container`: Jinja has a sandbox breakout through indirect reference to format method.(AAP-38056) + +* link:https://access.redhat.com/security/cve/cve-2024-11407[CVE-2024-11407] `ansible-lightspeed-container`: Denial-of-Service through data corruption in `gRPC-C++`.(AAP-36744) + + +=== {PlatformName} + +* Fixed *not found* error that occurred occasionally when navigating through the form wizards.(AAP-37495) + +* Fixed an issue where installing `ansible-core` no longer installs `python3-jmespath` on {RHEL} 8.(AAP-18251) + +* Fixed an issue where `ID_KEY` attribute was improperly used to determine the username field in social auth pipelines.(AAP-38300) + +* Fixed an issue where the `X-DAB-JW-TOKEN` header message would flood logs.(AAP-38169) + +* Fixed an issue where authenticator could create a *userid* and return a non-viable *authenticator_uid*.(AAP-38021) + +* Fixed an issue where a private key was displayed in plain text when downloading the OpenAPI schema file.(AAP-37843) +==== +NOTE: This was not the private key used by gateway, but a random default key. +==== + +=== {ControllerNameStart} + +* Fixed an issue that did not allow sending `job_lifecycle` logs to external aggregators.(AAP-37537) + +* Fixed an issue where there was a date comparison mismatch for traceback from `host_metric_summary_monthly` task.(AAP-37487) + +* Fixed an issue where the scheduled jobs with count set to a *non-zero* value would run unexpectedly. (AAP-37290) + +* Fixed an issue where a project's requirements.yml could revert to a prior state in a cluster. (AAP-37228) + +* Fixed an issue where there would be an occasional error creating the event partition table before starting a job, when a large number of jobs were launched quickly. (AAP-37227) + +* Fixed an issue where temporary receptor files were not cleaned up after a job completed on nodes. (AAP-36904) + +* Fixed an issue where *POST* to `/api/controller/login/` via the gateway resulted in a fatal response.(AAP-33911) + +* Fixed an issue when a job template was launched, the named URL returned a *404* error code.(AAP-37025) + + +==== Container-based {PlatformNameShort} + +* Fixed an issue where the receptor TLS certificate content was not validated during the preflight role execution ensuring that the *x509 Subject Alt Name* (SAN) field contains the required ISO Object Identifier (OID) 1.3.6.1.4.1.2312.19.1. (AAP-37880) + +* Fixed an issue where the *Postgresql SSL* mode variables for controller, {EDAName}, gateway and {HubName} were not validated during the preflight role execution. (AAP-37352) + +* Fixed an issue where the {PlatformNameShort} containerized setup installation would upload collections when inventory growth in the AIO installation was used.(AAP-38372) + +* Fixed an issue where the throttle capacity of controller in an AIO installation would allow for performance degradation.(AAP-38207) + + +=== RPM-based {PlatformNameShort} + +* Fixed an issue where adding a new {HubName} host to an upgraded environment has caused the installation to fail. (AAP-38204) + +* Fixed an issue where the link to the documents in the installer *README.md* was broken. (AAP-37627) + +* Fixed an issue where the Gateway API status on {EDAName} proxy component returned *404* errors. (AAP-32816) diff --git a/downstream/titles/release-notes/async/aap-25-20250122.adoc b/downstream/titles/release-notes/async/aap-25-20250122.adoc new file mode 100644 index 0000000000..e0260a5685 --- /dev/null +++ b/downstream/titles/release-notes/async/aap-25-20250122.adoc @@ -0,0 +1,23 @@ +[[aap-25-20250122]] + += {PlatformNameShort} patch release January 22, 2025 + +The following enhancements and bug fixes have been implemented in this release of {PlatformNameShort}. + +== Enhancements + +=== {PlatformNameShort} + +* Legacy *Auth SSO URL* settings are now customizable if needed for gateway, controller, and hub overrides passed on the {PlatformNameShort} CR if provided. This is mainly useful if you are using a custom ingress controller.(AAP-37364) + + +== Bug fixes + +=== {PlatformNameShort} + +* Fixed an issue where there was a `service_id` mismatch between gateway and {EDAName} which was causing activation rulebooks to fail.(AAP-38172) + +[NOTE] +==== +This fix applies to {OCPShort} only. +==== diff --git a/downstream/titles/release-notes/async/aap-25-20250129.adoc b/downstream/titles/release-notes/async/aap-25-20250129.adoc new file mode 100644 index 0000000000..36d29cddf6 --- /dev/null +++ b/downstream/titles/release-notes/async/aap-25-20250129.adoc @@ -0,0 +1,84 @@ +[[aap-25-20250129]] + += {PlatformNameShort} patch release January 29, 2025 + +The following enhancements and bug fixes have been implemented in this release of {PlatformNameShort}. + +== Enhancements + +=== {PlatformNameShort} + +* Using PostgreSQL TLS certificate authentication with an external database is now available.(AAP-38400) + + +=== {EDAName} + +* The `ansible.eda` collection has been updated to 2.3.1.(AAP-39057) +* Users are now able to create a new {EDAName} credential by copying an existing one.(AAP-39249) +* Added support for *file* and *env* injectors for credentials.(AAP-39091) + + +=== RPM-based {PlatformNameShort} + +* Implemented certificate authentication support (mTLS) for external databases. +** Postgresql TLS certificate authentication is available for external databases. +** Postgresql TLS certificate authentication can be turned on/off (off by default for backward compatibility). +** Each component, {ControllerName}, {EDAName}, {Gateway}, and {HubName}, now provides off the shelf (OTS) TLS certificate and key files (mandatory).(AAP-38400) + + +== Bug fixes + +=== CVE + +With this update, the following CVEs have been addressed: + +* link:https://access.redhat.com/security/cve/cve-2024-56326[CVE-2024-56326] `python3.11-jinja2`: Jinja has a sandbox breakout through indirect reference to format method.(AAP-38852) + +* link:https://access.redhat.com/security/cve/CVE-2024-56374[CVE-2024-56374] `ansible-lightspeed-container`: Potential denial-of-service vulnerability in IPv6 validation.(AAP-38647) + +* link:https://access.redhat.com/security/cve/CVE-2024-56374[CVE-2024-56374] `python3.11-django`: potential denial-of-service vulnerability in IPv6 validation.(AAP-38630) + +* link:https://access.redhat.com/security/cve/cve-2024-53907[CVE-2024-53907] `python3.11-django`: Potential denial-of-service in django.utils.html.strip_tags().(AAP-38486) + +* link:https://access.redhat.com/security/cve/cve-2024-56201[CVE-2024-56201] `python3.11-jinja2`: Jinja has a sandbox breakout through malicious filenames.(AAP-38331) + +* link:https://access.redhat.com/security/cve/CVE-2024-56374[CVE-2024-56374] `automation-controller`: Potential denial-of-service vulnerability in IPv6 validation.(AAP-38648) + +* link:https://access.redhat.com/security/cve/cve-2024-56201[CVE-2024-56201] `automation-controller`: Jinja has a sandbox breakout through malicious filenames.(AAP-38081) + +* link:https://access.redhat.com/security/cve/cve-2024-56326[CVE-2024-56326] `automation-controller`: Jinja has a sandbox breakout through indirect reference to format method.(AAP-38058) + + + +=== {ControllerNameStart} + +* Fixed an issue where the order of source inventories was not respected by the collection `ansible.controller`.(AAP-38524) + +* Fixed an issue where an actively running job on an execution node may have had its folder deleted by a system task. This fix addresses some *Failed to JSON parse a line from worker stream* type errors.(AAP-38137) + + + +=== Container-based {PlatformNameShort} + +* The inventory file variable *postgresql_admin_username* is no longer required when using an external database. If you do not have database administrator credentials, you can supply the database credentials for each component in the inventory file instead.(AAP-39077) + + +=== {EDAName} + +* Fixed an issue where the application version in the *openapi* spec was incorrectly set.(AAP-38392) + +* Fixed an issue where activations were not properly updated in some scenarios with a high load of the system. (AAP-38374) + +* Fixed an issue where users were unable to filter *Rule Audits* by rulebook activation name.(AAP-39253) + +* Fixed an issue where the input field of the injector configuration could not be empty.(AAP-39086) + + +=== RPM-based {PlatformNameShort} + +* Fixed an issue where setting `automationedacontroller_max_running_activations` could cause the installer to fail. (AAP-38708) + +* Fixed an issue where the {Gateway} services are not restarted when a dependency changes.(AAP-38918) + +* Fixed an issue where the {Gateway} could not be setup with custom SSL certificates.(AAP-38985) + diff --git a/downstream/titles/release-notes/async/aap-25-20250213.adoc b/downstream/titles/release-notes/async/aap-25-20250213.adoc new file mode 100644 index 0000000000..6d694e9de7 --- /dev/null +++ b/downstream/titles/release-notes/async/aap-25-20250213.adoc @@ -0,0 +1,133 @@ +[[aap-25-20250213]] + += {PlatformNameShort} patch release February 13, 2025 + +This release includes the following components and versions: + +[cols="1a,3a", options="header"] +|=== +| Release Date | Component versions + +| February 13, 2025 | +* {ControllerNameStart} 4.6.8 +* {HubNameStart} 4.10.1 +* {EDAName} 1.1.4 +* Container-based installer {PlatformNameShort} (bundle) 2.5-10 +* Container-based installer {PlatformNameShort} (online) 2.5-10 +* Receptor 1.5.1 +* RPM-based installer {PlatformNameShort} (bundle) 2.5-8.1 +* RPM-based installer {PlatformNameShort} (online) 2.5-8 + +|=== + +CSV Versions in this release: + +* Namespace-scoped Bundle: `aap-operator.v2.5.0-0.1738808953` + +* Cluster-scoped Bundle: `aap-operator.v2.5.0-0.1738809624` + +The following enhancements and bug fixes have been implemented in this release of {PlatformNameShort}. + + +== New Features + +=== {PlatformNameShort} + +* Keycloak now allows for the configuration of the claim key/name for the field containing a user's group membership returned in the ID token and/or user info data. This can be configured by setting the `GROUPS_CLAIM` configuration value on a per-authenticator plugin basis as was done for the OIDC plugin.(AAP-38720) + +== Enhancements + +=== General + +* The `ansible.controller` collection has been updated to 4.6.8.(AAP-39848) + +* `ansible.platform` collection has been updated to 2.5.20250213.(AAP-39740) + +* `ansible.eda` collection has been updated to 2.4.0.(AAP-39577) + +=== {PlatformNameShort} + +* It is now possible to configure {HubName} without Redis PVC.(AAP-39600) + + +=== {ControllerNameStart} + +* This release sees the addition of `client_id` and `client_secret` fields to the Insights credential to support service accounts via console.redhat.com.(AAP-36565) + +* You are now able to specify the input for the `client_id` and `client_secret` for the insights credential via the `awx.awx.credential_type` module.(AAP-37441) + +* Updated `awxkit` by adding service account support for Insights credential type, specifically adding the fields `client_id` and `client_secret` to `credential_input_fields`.(AAP-39352) + +=== {ExecEnvNameStart} + +* The *file* command has been added to *ee-minimal* and *ee-supported* container images.(AAP-40009) + +== Bug fixes + +=== Migration + +* Fixed an issue where after upgrading {PlatformNameShort} from 2.4 to 2.5, many of the surveys that had multiple choice options displayed a blank space in the drop down menu.(AAP-35093) + +=== {PlatformNameShort} + +* Fixed a bug in the collections token module where it was unable to find an application if multiple organizations had the same application name.(AAP-38625) + +* Fixed an issue where upgrading {PlatformNameShort} 2.5 caused an occasional internal server error for all users with {EDAName} and {HubNameStart} post upgrade.(AAP-39293) + +* Fixed an issue where the administrator was not allowed to configure auto migration of legacy authenticators.(AAP-39949) + +* Fixed an issue where there were two launch/relaunch icons displayed from the jobs list for failed jobs.(AAP-38483) + +* Fixed an issue where the *Schedules Add* wizard returned a `RequestError` *Not Found*.(AAP-37909) + +* Fixed an issue where the *EC2 Inventory Source* type required credentials, which is not necessary when using IAM instance profiles.(AAP-37346) + +* Fixed an issue when attempting to assign the *Automation Decisions - Organization Admin* role to a user in an organization resulted in the error, *Not managed locally, use the resource server instead*. Administrators can now be added by using the *Organization -> Administrators* tab.(AAP-37106) + +* Fixed an issue where when updating a workflow node, the Job Tags were lost and Skip Tags were not saved.(AAP-35956) + +* Fixed an issue where new users who logged in with legacy authentication were not merged when switching to Gateway authentication.(AAP-40120) + +* Fixed an issue where the user was unable to link legacy SSO accounts to Gateway.(AAP-40050) + +* Fixed an issue where updating {PlatformNameShort} to 2.5 caused an Internal Service Error for all users with {EDAName} and {HubNameStart} post upgrade. The migration process will now detect and fix users who were created in services via JWT auth and improperly linked to the service instead of the {Gateway}.(AAP-39914) + + +=== {OperatorPlatformNameShort} + +* Fixed an issue where `AnsibleWorkflow` custom resources would not parse and utilize `extra_vars` if specified.(AAP-39005) + +=== {ControllerNameStart} + +* Fixed an issue where when an Azure credential was created using `awxkit`, the creation failed because the parameter `client_id` was added to the input fields while the API was not expecting it.(AAP-39846) + +* Fixed an issue where the job schedules were running at incorrect times when that schedule's start time fell within a Daylight Saving Time period.(AAP-39826) + + +=== {HubNameStart} + +* Fixed an issue where the use of empty usernames and passwords when creating a remote registry was not allowed.(AAP-26462) + + +=== Container-based {PlatformNameShort} + +* Fixed an issue where the containerized installer had no preflight check for the Postgres version of an external database.(AAP-39727) + +* Fixed an issue where the containerized installer could not register other peers in the database.(AAP-39470) + +* Fixed an issue where there was a missing installation user UID check.(AAP-39393) + +* Fixed an issue where Postgresql connection errors would be hidden during its configuration.(AAP-39389) + +* Fixed an issue in the preflight check regression when the TLS private key provided is not an RSA type.(AAP-39816) + + +=== {EDAName} + +* Fixed an issue where the btn:[Generate extra vars] button did not handle file/env injected credentials.(AAP-36003) + +=== Known Issues + +* In the {Gateway}, the tooltip for *Projects -> Create Project - Project Base Path* is undefined.(AAP-27631) + +* Deploying the {Gateway} on FIPS enabled RHEL 9 is currently not supported.(AAP-39146) diff --git a/downstream/titles/release-notes/async/aap-25-20250225.adoc b/downstream/titles/release-notes/async/aap-25-20250225.adoc new file mode 100644 index 0000000000..908ec1b4e6 --- /dev/null +++ b/downstream/titles/release-notes/async/aap-25-20250225.adoc @@ -0,0 +1,105 @@ +[[aap-25-20250225]] + += {PlatformNameShort} patch release February 25, 2025 + +This release includes the following components and versions: + +[cols="1a,3a", options="header"] +|=== +| Release Date | Component versions + +| February 25, 2025 | +* {ControllerNameStart} 4.6.8 +* {HubNameStart} 4.10.1 +* {EDAName} 1.1.4 +* Container-based installer {PlatformNameShort} (bundle) 2.5-10.1 +* Container-based installer {PlatformNameShort} (online) 2.5-10 +* Receptor 1.5.1 +* RPM-based installer {PlatformNameShort} (bundle) 2.5-8.2 +* RPM-based installer {PlatformNameShort} (online) 2.5-8 + +|=== + +CSV Versions in this release: + +* Namespace-scoped Bundle: `aap-operator.v2.5.0-0.1740093573` + +* Cluster-scoped Bundle: `aap-operator.v2.5.0-0.1740094176` + + +The following enhancements and bug fixes have been implemented in this release of {PlatformNameShort}. + + +== Enhancements + +=== {GatewayStart} + +* Previously `gateway_proxy_url` was used for the proxy health check, but is no longer used in favor of the `ENVOY_HOSTNAME` setting.(AAP-39907) + + +=== {EDAName} + +* In the credential type schema the format field can be set to binary_base64 to specify a file should be loaded as a binary file.(AAP-36581) + +** Sample Credential Type Schema +** Inputs Configuration +** fields: +*** id: keytab +*** type: string +*** label: Kerberos Keytab file +*** format: binary_base64 +secret: true +*** help_text: Please select a Kerberos Keytab file +*** multiline: true + + +== Bug fixes + +=== {PlatformNameShort} + +* Fixed an issue where the subscription entitlement expiration notification was visible, even when the subscription was active.(AAP-39982) + +* Fixed an issue where upon UI reload/refresh, logs of a running job before the refresh would not appear until new logs were generated from the playbook.(AAP-38924) + +* Fixed an issue when the customer was unable to scale down replicas to put {PlatformNameShort} into idle mode.(AAP-39492) + +* After launching the *Workflow Job Template*, the launched job for a job template node in the workflow should contain the `job_tags` and `skip_tags` that were specified in the *launch prompt* step.(AAP-40395) + +* Fixed an issue where the user was not able to create a members role in {PlatformNameShort} 2.5.(AAP-37626) + +* Fixed an issue where a custom image showed Base64 encoded data.(AAP-26984) + +* Fixed an issue where a custom logo showed Base64 encoded data.(AAP-26909) + +* Fixed an issue that restricted users from executing jobs for which they had the correct permissions.(AAP-40398) + +* Fixed an issue where the workflow job template node extra vars were not saved.(AAP-40396) + +* Fixed an issue where the {TitleBuilder} guide had the incorrect ansible-core version.(AAP-40390) + +* Fixed an issue where you were not able to create a members role in {PlatformNameShort} 2.5.(AAP-40698) + +* Fixed an issue where the initial login to any of the services from {Gateway} could result in the user being given access to the wrong account.(AAP-40617) + +* Fixed an issue where the service owned resources were not kept in sync with the {Gateway} allowing for duplicate name values on user login.(AAP-40616) + +* Fixed an issue where users, organizations, and teams, became permanently out of sync if any user, organization, or team, was deleted from the {Gateway}.(AAP-40615) + +* Fixed an issue where {HubName} would fail to run the sync task if any users were deleted from the system.(AAP-40613) + + +=== {GatewayStart} + +* Fixed an issue where ping and status checks with resolvable, but nonresponding, URLs could cause all {Gateway} `uwsgi` workers to hang until all were exhausted. The new settings are `PING_PAGE_CHECK_TIMEOUT` and `PING_PAGE_CHECK_IGNORE_CERT`.(AAP-39907) + + +=== {EDAName} + +* Fixed an issue where credentials could be copied in AAP but could not be copied in {EDAName}.(AAP-35875) + + +=== Known Issues + +* In the {Gateway}, the tooltip for *Projects -> Create Project - Project Base Path* is undefined.(AAP-27631) + +* Deploying the {Gateway} on FIPS enabled RHEL 9 is currently not supported.(AAP-39146) diff --git a/downstream/titles/release-notes/async/aap-25-3-28-oct.adoc b/downstream/titles/release-notes/async/aap-25-3-28-oct.adoc new file mode 100644 index 0000000000..09209c993c --- /dev/null +++ b/downstream/titles/release-notes/async/aap-25-3-28-oct.adoc @@ -0,0 +1,78 @@ +[[aap-25-3-28-oct]] + += {PlatformNameShort} patch release October 28, 2024 + +The following enhancements and bug fixes have been implemented in this release of {PlatformNameShort}. + +== Enhancements + +=== {PlatformNameShort} + +* With this update, upgrades from {PlatformNameShort} 2.4 to 2.5 are supported for RPM and Operator-based deployments. For more information on how to upgrade, see link:{URLUpgrade}[{TitleUpgrade}]. (ANSTRAT-809) +** Upgrades from 2.4 Containerized {PlatformNameShort} Tech Preview to 2.5 Containerized {PlatformNameShort} are unsupported at this time. +** Upgrades for {EDAName} are unsupported from {PlatformNameShort} 2.4 to {PlatformNameShort} 2.5. + +=== {OperatorPlatformNameShort} + +* An informative redirect page is now shown when you go to the {HubName} URL root. (AAP-30915) + +=== Container-based {PlatformNameShort} + +* The TLS Certificate Authority private key can now use a passphrase. (AAP-33594) + +* {HubNameStart} is populated with container images (decision and execution environments) and Ansible collections. (AAP-33759) + +* The {ControllerName}, {EDAName}, and {HubName} legacy UIs now display a redirect page to the Platform UI rather than a blank page. (AAP-33794) + +=== RPM-based {PlatformNameShort} + +* Added platform Redis to RPM-based {PlatformNameShort}. This allows a 6 node cluster for a Redis high availability (HA) deployment. Removed the variable `aap_caching_mtls` and replaced it with `redis_disable_tls` and `redis_disable_mtls` which are boolean flags that disable Redis server TLS and Redis client certificate authentication. (AAP-33773) + +* An informative redirect page is now shown when going to {ControllerName}, {EDAName}, or {HubName} URL. (AAP-33827) + +== Bug fixes + +=== {PlatformNameShort} + +* Removed the *Legacy external password* option from the *Authentication Type* list. (AAP-31506) + +* {Galaxy}'s `sessionauth` class is now always the first in the list of authentication classes so that the platform UI can successfully authenticate. (AAP-32146) + +* link:https://access.redhat.com/security/cve/CVE-2024-10033[CVE-2024-10033] - `automation-gateway`: Fixed a Cross-site Scripting (XSS) vulnerability on the `automation-gateway` component that allowed a malicious user to perform actions that impact users. + +* link:https://access.redhat.com/security/cve/CVE-2024-22189[CVE-2024-22189] - `receptor`: Resolved an issue in `quic-go` that would allow an attacker to trigger a denial of service by sending a large number of `NEW_CONNECTION_ID` frames that retire old connection IDs. + +=== {ControllerNameStart} + +* link:https://access.redhat.com/security/cve/CVE-2024-41989[CVE-2024-41989] - `automation-controller`: Before this update, in Django, if `floatformat` received a string representation of a number in scientific notation with a large exponent, it could lead to significant memory consumption. With this update, decimals with more than 200 digits are now returned as is. + +* link:https://access.redhat.com/security/cve/CVE-2024-45230[CVE-2024-45230] - `automation-controller`: Resolved an issue in Python's Django `urlize()` and `urlizetrunc()` functions where excessive input with a specific sequence of characters would lead to denial of service. + +=== {HubNameStart} + +* Refactored the `dynaconf` hooks to preserve the necessary authentication classes for {PlatformNameShort} {PlatformVers} deployments. (AAP-31680) + +* During role migrations, model permissions are now re-added to roles to preserve ownership. (AAP-31417) + +=== {OperatorPlatformNameShort} + +* The port is now correctly set when configuring the {Gateway} cache `redis_host` setting when using an external Redis cache. (AAP-33279) + +* Added checksums to the {HubName} deployments so that pods are cycled to pick up changes to the PostgreSQL configuration and galaxy server settings Kubernetes secrets. (AAP-33518) + +=== Container-based {PlatformNameShort} + +* Fixed the uninstall playbook execution when the environment was already uninstalled. (AAP-32981) + +// Commenting this out for now as the advisories are not yet published to the Errata tab on the downloads page: https://access.redhat.com/downloads/content/480/ver=2.5/rhel---9/2.5/x86_64/product-errata + +// == Advisories +// The following errata advisories are included in this release: + +// * link:https://access.redhat.com/errata/[] + +// * link:https://access.redhat.com/errata/[] + +// * link:https://access.redhat.com/errata/[] + +// * link:https://access.redhat.com/errata/[] diff --git a/downstream/titles/release-notes/async/aap-25-4-18-nov.adoc b/downstream/titles/release-notes/async/aap-25-4-18-nov.adoc new file mode 100644 index 0000000000..cb06c66de4 --- /dev/null +++ b/downstream/titles/release-notes/async/aap-25-4-18-nov.adoc @@ -0,0 +1,72 @@ +[[aap-25-4-18-nov]] + += {PlatformNameShort} patch release November 18, 2024 + +The following enhancements and bug fixes have been implemented in this release of {PlatformNameShort}. + +== Enhancements + +* With this release, a redirect page has now been implemented that will be exhibited when you navigate to the root `/` for each component's stand-alone URL. The API endpoint remains functional. This affects {EDAName}, {ControllerName}, {OperatorPlatformNameShort}, and {OCPShort}. + + +== Bug fixes + +=== General + +With this update, the following CVEs have been addressed: + +link:https://access.redhat.com/security/cve/cve-2024-9902[CVE-2024-9902] ansible-core: Ansible-core user may read/write unauthorized content. + +link:https://access.redhat.com/security/cve/cve-2024-8775[CVE-2024-8775] ansible-core: Exposure of sensitive information in Ansible vault files due to improper logging. + + +=== {PlatformNameShort} + +* Fixed an issue where the user was unable to filter out hosts on inventory groups where it returned a *Failed to load* options on {PlatformNameShort} UI.(AAP-34752) + +=== Execution Environment + +* Update *pywinrm* to 0.4.3 in *ee-minimal* and *ee-supported* container images to fix Python 3.11 compatibility.(AAP-34077) + +=== {OperatorPlatformNameShort} + +* Fixed a syntax error when `bundle_cacert_secret` was defined due to incorrect indentation.(AAP-35358) + +* Fixed an issue where the default operator catalog for {PlatformNameShort} aligned to cluster-scoped versus namespace-scoped.(AAP-35313) + +* Added the ability to set tolerations and `node_selector` for the Redis *statefulset* and the gateway deployment.(AAP-33192) + +* Ensure the platform URL status is set when *Ingress* is used to resolve an issue with {Azure} on Cloud managed deployments. This is due to the {PlatformNameShort} operator failing to finish because it is looking for {OCPShort} routes that are not available on Azure Kubernetes Service.(AAP-34036) + +* Fixed an issue where the {PlatformNameShort} Operator description did not render code block correctly.(AAP-34589) + +* It is necessary to specify the `CONTROLLER_SSO_URL` and `AUTOMATION_HUB_SSO_URL` settings in Gateway to fix the OIDC auth redirect flow.(AAP-34080) + +* It is necessary to set the `SERVICE_BACKED_SSO_AUTH_CODE_REDIRECT_URL` setting to fix the OIDC auth redirect flow.(AAP-34079) + +=== Container-based {PlatformNameShort} + +* Fixed an issue when the port value was not defined in the `gateway_main_url` variable, the containerized installer failed with incorrect {ExecEnvShort} image reference error.(AAP-34716) + +* Fixed an issue where the containerized installer used port number when specifying the `image_url` for a decision environment. The user should not add a port to image URLs when using the default value.(AAP-34070) + +=== RPM-based {PlatformNameShort} + +* Fixed an issue where not setting up the *gpg* agent socket properly when multiple hub nodes are configured resulted in not creating a *gpg* socket file in `/var/run/pulp`.(AAP-34067) + +=== {ToolsName} + +* Fixed an issue where missing data files were not included in the molecule RPM package.(AAP-35758) + +// Commenting this out for now as the advisories are not yet published to the Errata tab on the downloads page: https://access.redhat.com/downloads/content/480/ver=2.5/rhel---9/2.5/x86_64/product-errata + +// == Advisories +// The following errata advisories are included in this release: + +// * link:https://access.redhat.com/errata/[] + +// * link:https://access.redhat.com/errata/[] + +// * link:https://access.redhat.com/errata/[] + +// * link:https://access.redhat.com/errata/[] diff --git a/downstream/titles/release-notes/async/aap-25-5-3-dec.adoc b/downstream/titles/release-notes/async/aap-25-5-3-dec.adoc new file mode 100644 index 0000000000..83220d2352 --- /dev/null +++ b/downstream/titles/release-notes/async/aap-25-5-3-dec.adoc @@ -0,0 +1,116 @@ +[[aap-25-5-3-dec]] + += {PlatformNameShort} patch release December 3, 2024 + +The following enhancements and bug fixes have been implemented in this release of {PlatformNameShort}. + +== Enhancements + +=== {PlatformNameShort} + +* {LightspeedShortName} has been updated to 2.5.241127.(AAP-35307) + +* `redhat.insights` Ansible collection has been updated to 1.3.0.(AAP-35161) + +* `ansible.eda` collection has been updated to 2.2.0 in {ExecEnvShort} and decision environment images.(AAP-3398) + +=== {OperatorPlatformNameShort} + +* With this update, you can set PostgreSQL SSL/TLS mode to `verify-full` or `verify-ca` with the proper `sslrootcert` configuration in the {HubName} Operator.(AAP-35368) + +=== Container-based {PlatformNameShort} + +* With this update, `ID` and `Image` fields from a container image are used instead of `Digest` and `ImageDigest` to trigger a container update.(AAP-36575) + +* With this update, you can now update the registry URL value in {EDAName} credentials.(AAP-35085) + +* With this update, the `kernel.keys.maxkeys` and `kernel.keys.maxbytes` settings are increased on systems with large memory configuration.(AAP-34019) + +* Added `ansible_connection=local` to the `inventory-growth file` and clarified its usage.(AAP-34016) + +=== Documentation updates + +* With this update, the Container growth topology and Container enterprise topology have been updated to include s390x (IBM Z) architecture test support.(AAP-35969) + +=== RPM-based {PlatformNameShort} + +* With this update, you can now update the registry URL value in {EDAName} credentials.(AAP-35162) + +== Bug fixes + +=== General + +With this update, the following CVEs have been addressed: + +* link:https://access.redhat.com/security/cve/CVE-2024-52304[CVE-2024-52304] `automation-controller`: `aiohttp` vulnerable to request smuggling due to wrong parsing of chunk extensions. + +=== {OperatorPlatformNameShort} + +* With this update, missing {OperatorPlatformNameShort} custom resource definitions (CRDs) are added to the `aap-must-gather` container image.(AAP-35226) + +* Disabled {Gateway} authentication in the proxy configuration to prevent HTTP 502 errors when the control plane is down.(AAP-36527) + +* The Red Hat favicon is now correctly displayed on {ControllerName} and {EDAName} API tabs.(AAP-30810) + +* With this update, the {ControllerName} admin password is now reused during upgrade from {PlatformNameShort} 2.4 to 2.5.(AAP-35159) + +* Fixed undefined variable (`_controller_enabled`) when reconciling an `AnsibleAutomationPlatformRestore`. Fixed {HubName} Operator `pg_restore` error on restores due to a wrong database secret being set.(AAP-35815) + +=== {ControllerNameStart} + +* Updated the minor version of uWSGI to obtain updated log verbiage.(AAP-33169) + +* Fixed job schedules running at the wrong time when the `rrule` interval was set to `HOURLY` or `MINUTELY`.(AAP-36572) + +* Fixed an issue where sensitive data was displayed in the job output.(AAP-35584) + +* Fixed an issue where unrelated jobs could be marked as a dependency of other jobs.(AAP-35309) + +* Included pod anti-affinity configuration on default container group pod specification to optimally spread workload.(AAP-35055) + +=== Container-based {PlatformNameShort} + +* With this update, you cannot change the `postgresql_admin_username` value when using a managed database node.(AAP-36577) + +* Added update support for PCP monitoring role. + +* Disabled {Gateway} authentication in the proxy configuration to prevent HTTP 502 errors when the control plane is down. + +* With this update, you can use dedicated nodes for the Redis group. + +* Fixed an issue where disabling TLS on {Gateway} would cause installation to fail. + +* Fixed an issue where disabling TLS on {Gateway} proxy would cause installation to fail. + +* Fixed an issue where {Gateway} uninstall would leave container systemd unit files on disk. + +* Fixed an issue where the {HubName} container signing service creation failed when `hub_collection_signing=false` but `hub_container_signing=true`. + +* Fixed an issue with the `HOME` environment variable for receptor containers which would cause a “Permission denied” error on the containerized execution node. + +* Fixed an issue where not setting up the GPG agent socket properly when many hub nodes are configured, resulted in not creating a GPG socket file in `/var/tmp/pulp`. + +* With this update, you can now change the {Gateway} port value after the initial deployment. + +=== Receptor + +* Fixed an issue that caused a Receptor runtime panic error. + +=== RPM-based {PlatformNameShort} + +* Fixed an issue where the `metrics-utility` command failed to run after updating {ControllerName}. + +* Fixed the owner and group permissions on the `/etc/tower/uwsgi.ini` file. + +* Fixed an issue where not having `eda_node_type` defined in the inventory file would result in backup failure. + +* Fixed an issue where not having `routable_hostname` defined in the inventory file would result in a restore failure. + +* With this update, the `inventory-growth` file is now included in the RPM installer. + +* Fixed an issue where the dispatcher service went into `FATAL` status and failed to process new jobs after a database outage of a few minutes. + +* Disabled {Gateway} authentication in the proxy configuration to allow access to the UI when the control plane is down. + +* With this update, the Receptor data directory can now be configured using the `receptor_datadir` variable. + diff --git a/downstream/titles/release-notes/async/async-updates.adoc b/downstream/titles/release-notes/async/async-updates.adoc new file mode 100644 index 0000000000..bd98ccc2d5 --- /dev/null +++ b/downstream/titles/release-notes/async/async-updates.adoc @@ -0,0 +1,18 @@ + += Patch releases + +Security, bug fixes, and enhancements for {PlatformNameShort} {PlatformVers} are released as asynchronous erratas. All {PlatformNameShort} erratas are available on the link:{PlatformDownloadUrl}[Download {PlatformName}] page. + +As a Red{nbsp}Hat Customer Portal user, you can enable errata notifications in the account settings for Red{nbsp}Hat Subscription Management (RHSM). When errata notifications are enabled, you receive notifications through email whenever new erratas relevant to your registered systems are released. + +[NOTE] +==== +Red{nbsp}Hat Customer Portal user accounts must have systems registered and consuming {PlatformNameShort} entitlements for {PlatformNameShort} errata notification emails to generate. +==== + +The patch releases section of the release notes will be updated over time to give notes on enhancements and bug fixes for patch releases of {PlatformNameShort} {PlatformVers}. + +[role="_additional-resources"] +.Additional resources +* For more information about asynchronous errata support in {PlatformNameShort}, see link:https://access.redhat.com/support/policy/updates/ansible-automation-platform[{PlatformName} Life Cycle]. +* For information about Common Vulnerabilities and Exposures (CVEs), see link:https://www.redhat.com/en/topics/security/what-is-cve[What is a CVE?] and link:https://access.redhat.com/security/security-updates/cve[Red Hat CVE Database]. diff --git a/downstream/titles/release-notes/topics/installer-version-table.adoc b/downstream/titles/release-notes/async/installer-version-table.adoc similarity index 59% rename from downstream/titles/release-notes/topics/installer-version-table.adoc rename to downstream/titles/release-notes/async/installer-version-table.adoc index bcd4040b3b..188a0f38ca 100644 --- a/downstream/titles/release-notes/topics/installer-version-table.adoc +++ b/downstream/titles/release-notes/async/installer-version-table.adoc @@ -6,12 +6,12 @@ |=== | Installation bundle | Component versions -| xref:installer-24-7[2.4-7] + -June 12, 2024 | -* `ansible-automation-platform-setup` 2.4-7 -* `ansible-core` 2.15.11 -* {ControllerNameStart} 4.5.7 -* {HubNameStart} 4.9.2 -* {EDAName} 1.0.7 +| Advisory link + +Month Date, 2024 | +* `ansible-automation-platform-setup` +* `ansible-core` +* {ControllerNameStart} +* {HubNameStart} +* {EDAName} -|=== \ No newline at end of file +|=== diff --git a/downstream/titles/release-notes/async/rpm-version-table.adoc b/downstream/titles/release-notes/async/rpm-version-table.adoc new file mode 100644 index 0000000000..8702f0840c --- /dev/null +++ b/downstream/titles/release-notes/async/rpm-version-table.adoc @@ -0,0 +1,18 @@ +// This table contains the component/package versions per RPM release + +.Component versions per errata advisory +//cols="a,a" formats the columns as AsciiDoc allowing for AsciiDoc syntax +[cols="2a,3a", options="header"] +|=== +| Errata advisory | Component versions + +| Advisory link + +Month Date, 2024 | +* `ansible-automation-platform-installer` +* `ansible-automation-platform-setup` +* `ansible-core` +* {ControllerNameStart} +* {HubNameStart} +* {EDAName} + +|=== diff --git a/downstream/titles/release-notes/docinfo.xml b/downstream/titles/release-notes/docinfo.xml index 26df600387..3fd858d7ca 100644 --- a/downstream/titles/release-notes/docinfo.xml +++ b/downstream/titles/release-notes/docinfo.xml @@ -1,7 +1,8 @@ -Red Hat Ansible Automation Platform release notes +Release notes Red Hat Ansible Automation Platform 2.5 -New features, enhancements, and bug fix information +New features, enhancements, and bug fix information + The release notes for Red Hat Ansible Automation Platform summarize all new features and enhancements, notable technical changes, major corrections from the previous version, and any known bugs upon general availability. diff --git a/downstream/titles/release-notes/master.adoc b/downstream/titles/release-notes/master.adoc index deb50aa0bf..e2c3e90669 100644 --- a/downstream/titles/release-notes/master.adoc +++ b/downstream/titles/release-notes/master.adoc @@ -1,69 +1,56 @@ // Templates for release notes are contained in the ..downstream/snippets folder. // For each release, make a copy of assembly-rn-template.adoc, rename and save as instructed in the template and add an include statement to this file. -include::attributes/attributes.adoc[] - -= Red Hat Ansible Automation Platform release notes - - -include::{Boilerplate}[] - -include::topics/platform-intro.adoc[leveloffset=+1] - -include::topics/aap-24.adoc[leveloffset=+1] - -include::topics/controller-440.adoc[leveloffset=+1] - -include::topics/eda-24.adoc[leveloffset=+1] - -include::topics/hub-464.adoc[leveloffset=+1] - -include::topics/operator-240.adoc[leveloffset=+1] - -include::topics/docs-24.adoc[leveloffset=+1] +//If there are any technology previews, add the file. +// Asynchronous release notes - commented out until 2.5 has asynchronous release note updates +// include::async/async-updates.adoc[leveloffset=+1] -// == Asynchronous updates -include::topics/async-updates.adoc[leveloffset=+1] - -=== RPM releases - -include::topics/rpm-version-table.adoc[leveloffset=+3] - -include::topics/rpm-24-7.adoc[leveloffset=+3] - -include::topics/rpm-24-6.adoc[leveloffset=+3] - -include::topics/rpm-24-5.adoc[leveloffset=+3] - -include::topics/rpm-24-4.adoc[leveloffset=+3] -include::topics/rpm-24-3.adoc[leveloffset=+3] +:experimental: -include::topics/rpm-24-2.adoc[leveloffset=+3] - -=== Installer releases - -include::topics/installer-version-table.adoc[leveloffset=+3] +include::attributes/attributes.adoc[] -include::topics/installer-24-7.adoc[leveloffset=+3] += Release notes -include::topics/installer-24-62.adoc[leveloffset=+3] -include::topics/installer-24-61.adoc[leveloffset=+3] +include::{Boilerplate}[] -include::topics/installer-24-6.adoc[leveloffset=+3] +include::topics/platform-intro.adoc[leveloffset=+1] -include::topics/installer-24-24.adoc[leveloffset=+3] +include::topics/aap-25.adoc[leveloffset=+1] -include::topics/installer-24-23.adoc[leveloffset=+3] +include::topics/aap-25-deprecated-features.adoc[leveloffset=+1] -include::topics/installer-24-22.adoc[leveloffset=+3] +include::topics/aap-25-removed-features.adoc[leveloffset=+1] -include::topics/installer-24-21.adoc[leveloffset=+3] +include::topics/aap-25-changed-features.adoc[leveloffset=+1] -include::topics/installer-24-14.adoc[leveloffset=+3] +include::topics/aap-25-known-issues.adoc[leveloffset=+1] -include::topics/installer-24-13.adoc[leveloffset=+3] +include::topics/aap-25-fixed-issues.adoc[leveloffset=+1] -include::topics/installer-24-12.adoc[leveloffset=+3] +include::topics/docs-25.adoc[leveloffset=+1] -include::topics/installer-24-11.adoc[leveloffset=+3] +// == Asynchronous updates +include::async/async-updates.adoc[leveloffset=+1] +// Async release 2.3-02-25-2025 +include::async/aap-25-20250225.adoc[leveloffset=+2] +// Async release 2.5-02-13-2025 +include::async/aap-25-20250213.adoc[leveloffset=+2] +// Async release 2.5-01-29-January +include::async/aap-25-20250129.adoc[leveloffset=+2] +// Async release 2.5-01-22-January +include::async/aap-25-20250122.adoc[leveloffset=+2] +// Asyn release 2.5-01-15-January +include::async/aap-25-20250115.adoc[leveloffset=+2] +// Asyn release 2.5-12-18-December +include::async/aap-25-12-18-dec.adoc[leveloffset=+2] +// Async release 2.5-5 3rd Dec +include::async/aap-25-5-3-dec.adoc[leveloffset=+2] +// Async release 2.5-4 18th Nov (was released early) +include::async/aap-25-4-18-nov.adoc[leveloffset=+2] +// Async release 2.5-3 (AKA event 2) 28th Oct +include::async/aap-25-3-28-oct.adoc[leveloffset=+2] +// Async release 2.5-2 14th Oct +include::async/aap-25-2-14-oct.adoc[leveloffset=+2] +//Async release 2.5-1 7th Oct +include::async/aap-25-1-7-oct.adoc[leveloffset=+2] diff --git a/downstream/titles/release-notes/topics/aap-24.adoc b/downstream/titles/release-notes/topics/aap-24.adoc deleted file mode 100644 index c18585a9bb..0000000000 --- a/downstream/titles/release-notes/topics/aap-24.adoc +++ /dev/null @@ -1,118 +0,0 @@ -// For each release of AAP, make a copy of this file and rename it to aap-rn-xx.adoc where xx is the release number; for example, 24 for the 2.4 release. -// Save the renamed copy of this file to the release-notes/topics directory topic files for the release notes reside. -//Only include release note types that have updates for a given release. For example, if there are no Technology previews for the release, remove that section from this file. - - -= Overview of the {PlatformNameShort} 2.4 release - -== New features and enhancements - -{PlatformNameShort} 2.4 includes the following enhancements: - -* Previously, the {ExecEnvShort} container images were based on RHEL 8 only. With {PlatformNameShort} 2.4 onwards, the {ExecEnvShort} container images are now also available on RHEL 9. -The {ExecEnvShort} includes the following container images: -** ansible-python-base -** ansible-python-toolkit -** ansible-builder -** ee-minimal -** ee-supported - -* The ansible-builder project recently released {Builder} version 3, a much-improved and simplified approach to creating execution environments. -You can use the following configuration YAML keys with {Builder} version 3: -** additional_build_files -** additional_build_steps -** build_arg_defaults -** dependencies -** images -** options -** version - -* {PlatformNameShort} 2.4 and later versions can now run on ARM platforms, including both the control plane and the execution environments. - -* Added an option to configure the SSO logout URL for {HubName} if you need to change it from the default value. - -* Updated the ansible-lint RPM package to version 6.14.3. - -* Updated Django for potential denial-of-service vulnerability in file uploads (link:https://access.redhat.com/security/cve/CVE-2023-24580[CVE-2023-24580]). - -* Updated sqlparse for ReDOS vulnerability (link:https://access.redhat.com/security/cve/CVE-2023-30608[CVE-2023-30608]). - -* Updated Django for potential denial-of-service in Accept-Language headers (link:https://access.redhat.com/security/cve/CVE-2023-23969[CVE-2023-23969]). - -* {PlatformNameShort} 2.4 adds the ability to install {ControllerName}, {HubName}, and {EDAName} on IBM Power (ppc64le), IBM Z (s390x), and IBM® LinuxONE (s390x) architectures. - -.Additional resources - -* For more information about using {Builder} version 3, see link:https://ansible.readthedocs.io/projects/builder/en/stable/[{Builder} Documentation] and link:https://docs.ansible.com/automation-controller/latest/html/userguide/ee_reference.html[Execution Environment Setup Reference]. - -== Technology Preview - -include::../snippets/technology-preview.adoc[] - -The following are Technology Preview features: - -* Starting with {PlatformNameShort} 2.4, the Platform Resource Operator can be used to create the following resources in {ControllerName} by applying YAML to your OpenShift cluster: -** Inventories -** Projects -** Instance Groups -** Credentials -** Schedules -** Workflow Job Templates -** Launch Workflows - -You can now configure the Controller Access Token for each resource with the `connection_secret` parameter, rather than the `tower_auth_secret` parameter. This change is compatible with earlier versions, but the `tower_auth_secret` parameter is now deprecated and will be removed in a future release. - -[role="_additional-resources"] -.Additional resources - -* For the most recent list of Technology Preview features, see link:https://access.redhat.com/articles/ansible-automation-platform-preview-features[Ansible Automation Platform - Preview Features]. - -* For information about execution node enhancements on OpenShift deployments, see link:https://docs.ansible.com/automation-controller/latest/html/administration/instances.html[Managing Capacity With Instances]. - -== Deprecated and removed features - -include::../snippets/deprecated-features.adoc[] - -The following functionality was deprecated and removed in {PlatformNameShort} 2.4: - -* On-premise component {CatalogName} is now removed from {PlatformNameShort} 2.4 onwards. - -* With the {PlatformNameShort} 2.4 release, the {ExecEnvShort} container image for Ansible 2.9 (*ee-29-rhel-8*) is no longer loaded into the {ControllerName} configuration by default. - -* Although you can still synchronize content, the use of synclists is deprecated and will be removed in a later release. Instead, {PrivateHubName} administrators can upload manually-created requirements files from the `rh-certified` remote. - -* You can now configure the Controller Access Token for each resource with the `connection_secret` parameter, rather than the `tower_auth_secret` parameter. This change is compatible with earlier versions, but the `tower_auth_secret` parameter is now deprecated and will be removed in a future release. - -* Smart inventories have been deprecated in favor of constructed inventories and will be removed in a future release. - -== Bug fixes - -{PlatformNameShort} 2.4 includes the following bug fixes: - -* Updated the installation program to ensure that collection auto signing cannot be enabled without enabling the collection signing service. - -* Fixed an issue with restoring backups when the installed {ControllerName} version is different from the backup version. - -* Fixed an issue with not adding user defined galaxy-importer settings to `galaxy-importer.cfg` file. - -* Added missing `X-Forwarded-For` header information to nginx logs. - -* Removed unnecessary receptor peer name validation when IP address is used as the name. - -* Updated the `outdated base_packages.txt` file that is included in the bundle installer. - -* Fixed an issue where upgrading the {PlatformNameShort} did not update the nginx package by default. - -* Fixed an issue where an *awx* user was created without creating an *awx* group on execution nodes. - -* Fixed the assignment of package version variable to work with flat file inventories. - -* Added a FQDN check for the {HubName} hostname required to run the Skopeo commands. - -* Fixed the front end URL for Red Hat Single Sign On (SSO) so it is now properly configured after you specify the `sso_redirect_host` variable. - -* Fixed the variable precedence for all component `nginx_tls_files_remote` variables. - -* Fixed the *setup.sh* script to escalate privileges if necessary for installing {PlatformNameShort}. - -* Fixed an issue when restoring a backup to an {HubName} with a different hostname. diff --git a/downstream/titles/release-notes/topics/aap-25-2-patch-release-7-oct-2024.adoc b/downstream/titles/release-notes/topics/aap-25-2-patch-release-7-oct-2024.adoc new file mode 100644 index 0000000000..b64294abc0 --- /dev/null +++ b/downstream/titles/release-notes/topics/aap-25-2-patch-release-7-oct-2024.adoc @@ -0,0 +1,59 @@ +//This is the working version of the patch release notes document. + +[[aap-25-2-patch-release-7-oct-2024]] + + += {PlatformName} 2.5-2 - October 7, 2024 + +This release includes a few enhancements and fixes that have been implemented in the Red Hat {PlatformNameShort} 2.5-2. + +== Enhancements + +* {EDAName} workers and scheduler add timeout and retry resilience when communicating with a Redis cluster. (AAP-32139) +* Removed the *MTLS* credential type that was incorrectly added. (AAP-31848) + +== Fixed issues + +=== {PlatformNameShort} + +* Fixed conditional that was skipping necessary tasks in the restore role, which was causing restores to not finish reconciling. (AAP-30437) + +* Systemd services in the containerized installer are now set with restart policy set to *always* by default. (AAP-31824) + +* *FLUSHDB* is now modified to account for shared usage of a Redis database. It now respects access limitations by removing only those keys that the client has permissions to. (AAP-32138) + +* Added a fix to ensure default *extra_vars* values are rendered in the *Prompt on launch* wizard. (AAP-30585) + +* Filtered out the unused *ANSIBLE_BASE_* settings from the environment variable in job execution. (AAP-32208) + + +=== {EDAName} + +* Configured the setting *EVENT_STREAM_MTLS_BASE_URL* to the correct default to ensure MTLS is disallowed in the RPM installer. (AAP-32027) + +* Configured the setting *EVENT_STREAM_MTLS_BASE_URL* to the correct default to ensure MTLS is disallowed in the containerized installer. (AAP-31851) + +* Fixed a bug where the Event-Driven Ansible workers and scheduler are unable to reconnect to the Redis cluster if a primary Redis node enters a *failed* state and a new primary node is promoted. See the KCS article link:https://access.redhat.com/articles/7088545[Redis failover causes {EDAName} activation failures] that include the steps that were necessary before this bug was fixed. (AAP-30722) + +== Advisories +This section lists the various errata advisories contained in this release. + +.Errata advisories +//cols="a,a" formats the columns as AsciiDoc allowing for AsciiDoc syntax +[cols="2a,3a", options="header"] +|=== +| Patch release version | Errata advisory + +| {PlatformNameShort} 2.5-2 - October 7, 2024 + +| + +link:https://access.redhat.com/errata/RHBA-2024:7756[RHBA-2024:7756] + +link:https://access.redhat.com/errata/RHBA-2024:7760[>RHBA-2024:7760] + +link:https://access.redhat.com/errata/RHBA-2024:7766[RHBA-2024:7766] + +link:https://access.redhat.com/errata/RHBA-2024:7810[RHBA-2024:7810] + +|=== diff --git a/downstream/titles/release-notes/topics/aap-25-changed-features.adoc b/downstream/titles/release-notes/topics/aap-25-changed-features.adoc new file mode 100644 index 0000000000..c2a9cb8619 --- /dev/null +++ b/downstream/titles/release-notes/topics/aap-25-changed-features.adoc @@ -0,0 +1,33 @@ +[[aap-2.5-changed-features]] += Changed features + +Changed features are not deprecated and will continue to be supported until further notice. + +The following table provides information about features that are changed in {PlatformNameShort} 2.5: + +[cols="20%,80%"] +|=== +| Component | Feature + +|{HubNameStart} +|Error codes are now changed from 403 to 401. Any API client usage relying on specific status code 403 versus 401 will have to update their logic. Standard UI usage will work as expected. + +|{EDAName} +|The endpoints `/extra_vars` are now moved to a property within `/activations`. + +|{EDAName} +|The endpoint `/credentials` was replaced with `/eda-credentials`. This is part of an expanded credentials capability for {EDAName}. For more information, see the chapter link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.4/html/event-driven_ansible_controller_user_guide/eda-credentials[Setting up credentials for {EDAcontroller}] in the _{EDAcontroller} user guide_. + +|{EDAName} +|{EDAName} can no longer add, edit, or delete the {Gateway}-managed resources. Creating, editing, or deleting organizations, teams, or users is available through {Gateway} endpoints only. The {Gateway} endpoints also enable you to edit organization or team memberships and configure external authentication. + +|API +|Auditing of users has now changed. Users are now audited through the platform API, not through the controller API. This change applies to the {PlatformNameShort} in both cloud service and on-premise deployments. + +|{ControllerNameStart}, + +{HubName}, + +{Gateway}, and + +{EDAName} +|User permission audits the sources of truth for the {Gateway}. When an IdP (SSO) is used, then the IdP should be the source of truth for user permission audits. When the {PlatformNameShort} {Gateway} is used without SSO, then the {Gateway} should be the source of truth for user permissions, not the app-specific UIs or APIs. + +|=== \ No newline at end of file diff --git a/downstream/titles/release-notes/topics/aap-25-deprecated-features.adoc b/downstream/titles/release-notes/topics/aap-25-deprecated-features.adoc new file mode 100644 index 0000000000..01bc6f0d82 --- /dev/null +++ b/downstream/titles/release-notes/topics/aap-25-deprecated-features.adoc @@ -0,0 +1,140 @@ +[[aap-2.5-deprecated-features]] += Deprecated features + +include::../snippets/deprecated-features.adoc[] + +The following table provides information about features that were deprecated in {PlatformNameShort} 2.5: + +[cols="20%,80%"] +|=== +| Component | Feature + +|{ControllerNameStart}, + +{HubName}, and + +{EDAcontroller} +|Tokens for the {ControllerName} and the {HubName} are deprecated. If you want to generate tokens, use the {Gateway} to create them. + +The {Gateway} is the service that handles authentication and authorization for the {PlatformNameShort}. It provides a single entry into the {PlatformNameShort} and serves the platform user interface, so you can authenticate and access all of the {PlatformNameShort} services from a single location. + +|{ControllerNameStart} and + +{HubName} +|All non-local authentications into the {ControllerName} and {HubName} are deprecated. Use the {Gateway} to configure external authentications, such as SAML, LDAP, and RADIUS. + +|Ansible-core +|The `INI` configuration option in the *COLLECTIONS_PATHS* is deprecated. Use the singular form *COLLECTIONS_PATH* instead. + +|Ansible-core +|The environment variable *ANSIBLE_COLLECTIONS_PATHS* is deprecated. Use the singular form *ANSIBLE_COLLECTIONS_PATH* instead. + +|Ansible-core +|Old-style Ansible vars plug-ins that use the entry points `get_host_vars` or `get_group_vars` were deprecated in ansible-core 2.16, and will be removed in ansible-core 2.18. Update the Ansible plug-in to inherit from *BaseVarsPlugin* and define a `get_vars` method as the entry point. + +|Ansible-core +|The *STRING_CONVERSION_ACTION* configuration option is deprecated as it is no longer used in the ansible-core code base. + +|Ansible-core +|The *smart* option for setting a connection plug-in is being removed as its main purpose of choosing between SSH and Paramiko protocols is now irrelevant. Select an explicit connection plug-in instead. + +|Ansible-core +|The undocumented `vaultid` parameter in the `vault` and `unvault` filters is deprecated and will be removed in ansible-core version 2.20. Use `vault_id` instead. + +|Ansible-core +|The string parameter `keepcache` in the `yum_repository` is deprecated. + +|Ansible-core +|The `required` parameter in the API `ansible.module_utils.common.process.get_bin_path` is deprecated. + +|Ansible-core +|`module_utils` - Importing the following convenience helpers from `ansible.module_utils.basic` has been deprecated: + +`get_exception`, `literal_eval`, `_literal_eval`, `datetime`, `signal`, `types`, `chain`, `repeat`, `PY2`, `PY3`, `b`, `binary_type`, `integer_types`, `iteritems`, `string_types`, `test_type`, `map`, and `shlex_quote`. + +Import the helpers from the source definition. + +|Ansible-core +|`ansible-doc` - Role `entrypoint` attributes are deprecated and eventually will no longer be shown in `ansible-doc` from ansible-core. + +|{ExecEnvNameStartSing} +|Execution environment-29 will be deprecated in the next major release after {PlatformNameShort} 2.5. + +|Installer +|The Ansible team is exploring ways to improve the installation of the {PlatformNameShort} on {RHEL}, which may include changes to how components are deployed using RPM directly on the host OS. RPMs will be replaced by packages deployed into containers that are run via Podman; this is similar to how automation currently executes on Podman in containers (execution environments) on the host OS. Changes will be communicated through release notes, but removal will occur in major release versions of the {PlatformNameShort}. + +|Automation mesh +|The Work Python option has been deprecated and will be removed from automation mesh in a future release. + +|=== + + +== Deprecated API endpoints + +API endpoints that will be removed in a future release either because their functionality is being removed or superseded with other capabilities. For example, with the platform moving to a centralized authentication system in the {Gateway}, the existing authorization APIs in the {ControllerName} and {HubName} are being deprecated for future releases as all authentication operations should occur in the {Gateway}. + +[cols="20%,40%,40%"] +|=== +| Component | Endpoint | Capability + +|{ControllerNameStart} +|`*/api/o*` +|Token authentication is moving to the {Gateway}. + +|{HubNameStart} +|`*/api/login/keycloak*` +|Moving to the {Gateway}. + +|{HubNameStart} +|`*/api/v3/auth/token*` +|Token authentication used for pulling collections will migrate to the {Gateway} tokens. + +|{ControllerNameStart} +|`*/api/v2/organizations*` +|Moving to the {Gateway}. + +|{ControllerNameStart} +|`*/api/v2/teams*` +|Moving to the {Gateway}. + +|{ControllerNameStart} +|`*/api/v2/users*` +|Moving to the {Gateway}. + +|{ControllerNameStart} +|`*/api/v2/roles*` +|Controller-specific role definitions are moving to `*/api/controller/v2/role_definitions*`. + +|{ControllerNameStart} +a| +The following roles lists: + +* `*/api/v2/teams/{id}/roles/*` +* `*/api/v2/users/{id}/roles/*` +|Controller-specific resource permissions are moving to `*/api/controller/v2/role_user_assignments*` and `*/api/controller/v2/role_team_assignments*`. + +|{ControllerNameStart} +a| +The following object roles lists: + +* `*/api/v2/credentials/{id}/object_roles/*` +* `*/api/v2/instance_groups/{id}/object_roles/*` +* `*/api/v2/inventories/{id}/object_roles/*` +* `*/api/v2/job_templates/{id}/object_roles/*` +* `*/api/v2/organizations/{id}/object_roles/*` +* `*/api/v2/projects/{id}/object_roles/*` +* `*/api/v2/teams/{id}/object_roles/*` +* `*/api/v2/workflow_job_templates/{id}/object_roles/*` +|Controller-specific resource permissions are moving to `*/api/controller/v2/role_user_assignments*` and `*/api/controller/v2/role_team_assignments*`. + +|{ControllerNameStart} +a| +The following resource access lists: + +* `*/api/v2/credentials/{id}/access_list/*` +* `*/api/v2/instance_groups/{id}/access_list/*` +* `*/api/v2/inventories/{id}/access_list/*` +* `*/api/v2/job_templates/{id}/access_list/*` +* `*/api/v2/organizations/{id}/access_list/*` +* `*/api/v2/projects/{id}/access_list/*` +* `*/api/v2/teams/{id}/access_list/*` +* `*/api/v2/users/{id}/access_list/*` +* `*/api/v2/workflow_job_templates/{id}/access_list/*` +|No replacements yet. + +|=== \ No newline at end of file diff --git a/downstream/titles/release-notes/topics/aap-25-fixed-issues.adoc b/downstream/titles/release-notes/topics/aap-25-fixed-issues.adoc new file mode 100644 index 0000000000..665e6d8e5b --- /dev/null +++ b/downstream/titles/release-notes/topics/aap-25-fixed-issues.adoc @@ -0,0 +1,82 @@ +[[aap-2.5-fixed-issues]] += Fixed issues + +This section provides information about fixed issues in {PlatformNameShort} 2.5. + +== {PlatformNameShort} + +* The installer now ensures semanage command is available when SELinux is enabled. (AAP-24396) + +* The installer can now update certificates without attempting to start the nginx service for previously installed environments. (AAP-19948) + +* {EDAName} installation now fails when the pre-existing {ControllerName} is older than version 4.4.0. (AAP-18572) + +* {EDAName} can now successfully install on its own with a controller URL when the controller is not in the inventory. (AAP-16483) + +* Postgres tasks that create users in FIPS environments now use *scram-sha-256*. (AAP-16456) + +* The installer now successfully generates a new `SECRET_KEY` for controller. (AAP-15513) + +* Ensure all backup and restore staged files and directories are cleaned up before running a backup or restore. You must also mark the files for deletion after a backup or restore. (AAP-14986) + +* Postgres certificates are now temporarily copied when checking the Postgres version for SSL mode verify-full. (AAP-14732) + +* The setup script now warns if the provided log path does not have write permissions, and fails if default path does not have write permissions. (AAP-14135) + +* The linger configuration is now correctly set by the root user for the {EDAName} user. (AAP-13744) + +* Subject alternative names for component hosts will now only be checked for signing certificates when HTTPS is enabled. (AAP-7737) + +* The UI for creating and editing an organization now validates the *Max hosts* value. This value must be an integer and have a value between 0 and 214748364. (AAP-23270) + +* Installations that do not include the {ControllerName} but have an external database will no longer install an unused internal Postgres server. (AAP-29798) + +* Added default port values for all `pg_port` variables in the installer. (AAP-18484) + +* *XDG_RUNTIME_DIR* is now defined when applying {EDAName} linger settings for Podman. (AAP-18341)* + +* Fixed an issue where the restore process failed to stop *pulpcore-worker* services on RHEL 9. (AAP-12829) + +* Fixed Postgres *sslmode* for verify-full that affected external Postgres and Postgres signed for 127.0.0.1 for internally managed Postgres. (AAP-7107) + +* Fixed support for {HubName} content signing. (AAP-9739) + +* Fixed conditional code statements to align with changes from ansible-core issue #82295. (AAP-19053) + +* Resolved an issue where providing the database installation with a custom port broke the installation of Postgres. (AAP-30636) + +== {HubNameStart} + +* {HubNameStart} now uses system crypto-policies in nginx. (AAP-17775) + +== {EDAName} + +* Fixed a bug where the Swagger API docs URL returned 404 error with trailing slash. (AAP-27417) + +* Fixed a bug where logs contained stack trace errors inappropriately. (AAP-23605) + +* Fixed a bug where the API returned error 500 instead of error 400 when a foreign key ID did not exist. (AAP-23105) + +* Fix a bug where the Git hash of a project could be empty. (AAP-21641) + +* Fixed a bug where an activation could fail at the start time due to authentication errors with Podman. (AAP-21067) + +* Fixed a bug where a project could not get imported if it contained a malformed rulebook. (AAP-20868) + +* Added *EDA_CSRF_TRUSTED_ORIGINS*, which can be set by user input or defined based on the allowed hostnames provided or determined by the installer as a default. (AAP-19319) + +* Redirected all {EDAName} traffic to `/eda/` following UI changes that require the redirect. (AAP-18989) + +* Fixed target database for Event-Driven automation restore from backup. (AAP-17918) + +* Fixed the {ControllerName} URL check when installing {EDAName} without a controller. (AAP-17249) + +* Fixed a bug when the membership operator failed in a condition applied to a previously saved event. (AAP-16663) + +* Fixed {EDAName} nginx configuration for custom HTTPS port. (AAP-16000) + +* Instead of the target service only, all {EDAName} services are enabled after installation is completed. The {EDAName} services will always start after the setup is complete. (AAP-15889) + +== {OperatorPlatformNameShort} + +* Fixed Django REST Framework (DRF) browsable views. (AAP-25508) diff --git a/downstream/titles/release-notes/topics/aap-25-known-issues.adoc b/downstream/titles/release-notes/topics/aap-25-known-issues.adoc new file mode 100644 index 0000000000..aaf1a9e509 --- /dev/null +++ b/downstream/titles/release-notes/topics/aap-25-known-issues.adoc @@ -0,0 +1,27 @@ +[[aap-2.5-known-issues]] += Known issues + +This section provides information about known issues in {PlatformNameShort} 2.5. + +== {PlatformNameShort} + +* Added the `podman_containers_conf_logs_max_size` variable for *containers.conf* to control the max log size for Podman installations. The default value is 10 MiB. (AAP-12295) + +* Setting the `pg_host=` value without any other context no longer results in an empty HOST section of the *settings.py* in the {ControllerName}. As a workaround, delete the `pg_host=` value or set it to `pg_host=''`. (AAP-31915) + +* Using *Prompt on launch* for variables for job templates, workflow job templates, workflow visualizer nodes, and schedules will not show the default variables when launching the job, or when configuring the workflows and schedules. (AAP-30585) + +* The unused *ANSIBLE_BASE_* settings are included as environment variables in the job execution. These variables suffixed with *SECRET* are no longer used in the {PlatformNameShort} and might be ignored until they are removed in a future patch. (AAP-32208) + +== {EDAName} + +* mTLS event stream creation should be disallowed on all installation methods by default. It is currently disallowed on {OCPShort} installation, but not disallowed in the containerized installations or on RPM installations. (AAP-31337) + +* If a primary Redis node enters a `failed` state and a new primary node is promoted, {EDAName} workers and scheduler are unable to reconnect to the cluster. This causes activations to fail until the containers or pods are recycled. (AAP-30722) + +For more information, see the KCS article link:https://access.redhat.com/articles/7088545[Redis failover causes {EDAName} activation failures]. + +== {AAPRHDH} + +* Python VS Code extension v2024.14.1 does not work in OpenShift Dev Spaces version 1.9.3, prohibiting the Ansible VS Code extension from loading. As a workaround, downgrade the Python VS Code extension version to 2024.12.3. + +* The Ansible Content Creator *Get Started* page links do not work in OpenShift Dev Spaces version 1.9.3. As a workaround, use the link:https://code.visualstudio.com/docs/getstarted/userinterface#:~:text=VS%20Code%20is%20equally%20accessible,for%20the%20most%20common%20operations[Ansible VS Code Command Palette] to access the features. diff --git a/downstream/titles/release-notes/topics/aap-25-removed-features.adoc b/downstream/titles/release-notes/topics/aap-25-removed-features.adoc new file mode 100644 index 0000000000..101e336f2d --- /dev/null +++ b/downstream/titles/release-notes/topics/aap-25-removed-features.adoc @@ -0,0 +1,69 @@ +[[aap-2.5-removed-features]] += Removed features + +Removed features are those that were deprecated in earlier releases. They are now removed from the {PlatformNameShort}, and will no longer be supported. + +The following table provides information about features that are removed in {PlatformNameShort} 2.5: + +[cols="20%,80%"] +|=== +| Component | Feature + +|{ControllerNameStart} +|Proxy support for the {ControllerName} has been removed. Load balancers must now point to the {Gateway} instead of the controller. + +|ansible-lint +|Support for old Ansible `include` tasks syntax is removed in version 2.16 and moved to `include_tasks` or `import_tasks`. Update content to use the currently-supported Ansible syntax, like link:https://docs.ansible.com/ansible/latest/collections/ansible/builtin/include_tasks_module.html[include_tasks] or link:https://docs.ansible.com/ansible/latest/collections/ansible/builtin/import_tasks_module.html#ansible-collections-ansible-builtin-import-tasks-module[import_tasks]. + +|{EDAcontroller} +|Tokens for the {EDAcontroller} are deprecated. Their configuration has been removed from rulebook activations, and they have been replaced with the {PlatformNameShort} credential type. + +|Ansible-core +|Support for Windows Server versions 2012 and 2012 R2 is removed, as Microsoft's supported end-of-life date is 10 October 2023. These versions of Windows Server are not tested in the {PlatformNameShort} 2.5 release. Red Hat does not guarantee that these features will continue to work as expected in this and future releases. + +|Ansible-core +|In the Action plugin with an *ActionBase* class, the deprecated `_remote_checksum` method is now removed. Use `_execute_remote_stat` instead. + +|Ansible-core +|The deprecated *FileLock* class is now removed. Add your own implementation or rely on third-party support. + +|Ansible-core +|Python 3.9 is now removed as a supported version of the {ControllerName}. Use Python 3.10 or later. + +|Ansible-core +|The `include` module that was deprecated in ansible-core 2.12 is now removed. Use `include_tasks` or `import_tasks` instead. + +|Ansible-core +|`Templar` - The deprecated `shared_loader_obj` parameter of `___init___` is now removed. + +|Ansible-core +|`fetch_url` - Removed auto disabling `decompress` when gzip is not available. + +|Ansible-core +|`inventory_cache` - Removed deprecated `default.fact_caching_prefix ini` configuration option. Use `defaults.fact_caching_prefix` instead. + +|Ansible-core +|`module_utils/basic.py` - Removed Python 3.5 as a supported remote version. Python version 2.7 or Python version 3.6 or later is now required. + +Removed Python versions 2.7 and 3.6 as supported remote versions. Use Python 3.7 or later for target execution. + +*NOTE:* This applies to Ansible version 2.17 only. + +With the removal of Python 2 support, the `yum` module and `yum action` plug-in are removed and redirected to `dnf`. + +|Ansible-core +|`stat` - Removed the unused `get_md5` parameter. + +|Ansible-core +|Removed the deprecated `JINJA2_NATIVE_WARNING` environment variable. + +|Ansible-core +|Removed the deprecated `scp_if_ssh` from the ssh connection plugin. + +|Ansible-core +|Removed the deprecated `crypt` support from `ansible.utils.encrypt`. + +|Execution environment +|The Python link is no longer available in the ubi9-based execution environments; only python3 is. Replace scripts that use `python` or `/bin/python` with `python3` or `/bin/python3`. + +|=== \ No newline at end of file diff --git a/downstream/titles/release-notes/topics/aap-25.adoc b/downstream/titles/release-notes/topics/aap-25.adoc new file mode 100644 index 0000000000..2573cd4c7c --- /dev/null +++ b/downstream/titles/release-notes/topics/aap-25.adoc @@ -0,0 +1,184 @@ +// For each release of AAP, make a copy of this file and rename it to aap-rn-xx.adoc where xx is the release number; for example, 24 for the 2.4 release. +// Save the renamed copy of this file to the release-notes/topics directory topic files for the release notes reside. +//Only include release note types that have updates for a given release. For example, if there are no Technology previews for the release, remove that section from this file. + +[id="new-features"] += New features and enhancements + +== Installation changes +Starting with {PlatformNameShort} 2.5, three different on-premise deployment models are fully tested. In addition to the existing RPM-based installer and operator, support for the containerized installer is being added. + +As the platform moves toward a container-first model, the RPM-based installer will be removed in a future release, and a deprecation warning is being issued with the release of {PlatformNameShort} 2.5. While the RPM installer will still be supported for {PlatformNameShort} 2.5 until it is removed, the investment will focus on the {ContainerBase} for RHEL deployments and the {OperatorBase} for OpenShift deployments. Upgrades from 2.4 containerized {PlatformNameShort} Technology Preview to 2.5 containerized {PlatformNameShort} are unsupported at this time. + +== Deployment topologies +Red Hat tests {PlatformNameShort} 2.5 with a defined set of topologies to provide you with opinionated deployment options. While it is possible to install the {PlatformNameShort} on different infrastructure topologies and with different environment configurations, Red Hat guarantees support for the topologies outlined in the following table. + +At the time of the {PlatformNameShort} 2.5 GA release, a limited set of topologies are fully tested. Red Hat will regularly add new topologies to iteratively expand the scope of fully tested deployment options. As new topologies roll out, we will include them in the release notes. + +The following table shows the tested topologies for {PlatformNameShort} 2.5: + +[%autowidth] +|=== +| Mode | Infrastructure | Description | Tested topologies + +|RPM | Virtual Machines/Bare Metal | The RPM installer deploys the {PlatformNameShort} on {RHEL} using RPMs to install the platform on host machines. Customers manage the product and infrastructure lifecycle. +a| +* RPM enterprise topology +* RPM mixed enterprise topology + +|Containers | Virtual Machines/Bare Metal | The containerized installer deploys the {PlatformNameShort} on {RHEL} by using Podman that runs the platform in containers on host machines. Customers manage the product and infrastructure lifecycle. +a| +* Container enterprise topology +* Container growth topology + +|Operator | Red Hat OpenShift | The operator uses Red Hat OpenShift operators to deploy the {PlatformNameShort} within Red Hat OpenShift. Customers manage the product and infrastructure lifecycle. +a| +* Operator enterprise topology +* Operator growth topology + +|=== + +For more information, see {LinkTopologies}. + +== Unified UI +In versions before 2.5, the {PlatformNameShort} was split into three primary services: {ControllerName}, {HubName}, and {EDAcontroller}. Each service included standalone user interfaces, separate deployment configurations, and separate authentication schemas. + +In {PlatformNameShort} 2.5, the {Gateway} is provided as a service that handles authentication and authorization for the {PlatformNameShort}. With the {Gateway}, all services that make up the {PlatformNameShort} are consolidated into a single unified UI. The unified UI provides a single entry into the {PlatformNameShort} and serves the platform user interface to authenticate and access all of the {PlatformNameShort} services from a single location. + +=== Terminology changes + +The Unified UI highlights the functional benefits provided by each underlying service. New UI terminology aligns to earlier names as follows: + +* *Automation execution* provides functionality from the *{ControllerName}* service +* *Automation decisions* provides functionality from the *{EDAName}* service +* *Automation content* provides functionality from the *{HubName}* service + +== {EDAName} functionality (Automation decisions) +With {PlatformNameShort} 2.5, {EDAName} functionality has been enhanced with the following features: + +* Enterprise single-sign on and role-based access control are available through a new {PlatformNameShort} UI, which enables a single point of authentication and access to all functional components as follows: +** Automation Execution ({ControllerName}) +** Automation Decision ({EDAName}) +** Automation Content ({HubName}) +** Automation Analytics +** Access Management +** {LightspeedShortName} + +* Simplified event routing capabilities introduce event streams. Event streams are an easy way to connect your sources to your rulebooks. This new capability lets you create a single endpoint to receive alerts from an event source and then use the events in multiple rulebooks. This simplifies rulebook activation setup, reduces maintenance demands, and helps lower risk by eliminating the need for additional ports to be open to external traffic. + +* {EDAName} in the {PlatformNameShort} 2.5 now supports horizontal scalability and enables high-availability deployments of the {EDAController}. These capabilities allow for the installation of multiple {EDAName} nodes and thus enable you to create highly available deployments. + +* Migration to the new platform-wide {PlatformName} credential type replaces the legacy controller token for enabling rulebook activations to call jobs in the {ControllerName}. + +* {EDAName} now has the ability to manage credentials that can be added to rulebook activations. These credentials can be used in rulebooks to authenticate to event sources. In addition, you can now attach vault credentials to rulebook activations so that you can use vaulted variables in rulebooks. Encrypted credentials and vaulted variables enable enterprises to secure the use of {EDAName} within their environment. + +* New modules are added to the *ansible.eda* collection to enable users to automate the configuration of the {EDAcontroller} using Ansible playbooks. + +[id="eda-2.5-with-automation-controller-2.4"] +== {EDAName} 2.5 with {ControllerName} 2.4 +You can use a newly installed version of {EDAName} from {PlatformNameShort} 2.5 with some existing versions of the {ControllerName}. A hybrid configuration is supported with the following versions: + +* 2.4 {PlatformNameShort} version of {ControllerName} (4.4 or 4.5) +* 2.5 {PlatformNameShort} version of {EDAName} (1.1) + +You can only use new installations of {EDAName} in this configuration. RPM-based hybrid deployments are fully supported by Red Hat. For details on setting up this configuration, see the chapter *Installing {EDAController} 1.1 and configuring {ControllerName} 4.4 or 4.5* in the link:{BaseURL}/red_hat_ansible_automation_platform/2.4/html/using_event-driven_ansible_2.5_with_ansible_automation_platform_2.4[Using Event-Driven Ansible 2.5 with Ansible Automation Platform 2.4] guide. + +A hybrid configuration means you can install a new {EDAName} service and configure rulebook activations to execute job templates on a 2.4 version of the {ControllerName}. + +== {LightspeedShortName} on-premise deployment +{LightspeedFullName} is a generative AI service that helps automation teams create, adopt, and maintain Ansible content more efficiently; it is now available as an on-premise deployment on the {PlatformNameShort} 2.5. + +The on-premise deployment provides the {PlatformNameShort} customers more control over their data and supports compliance with enterprise security policies. For example, organizations in sensitive industries with data privacy or air-gapped requirements can use on-premise deployments of both {LightspeedShortName} and {ibmwatsonxcodeassistant} for {LightspeedShortName} on Cloud Pak for Data. {LightspeedShortName} on-premise deployments are supported on {PlatformNameShort} 2.5. For more information, see the chapter link:https://docs.redhat.com/en/documentation/red_hat_ansible_lightspeed_with_ibm_watsonx_code_assistant/2.x_latest/html-single/red_hat_ansible_lightspeed_with_ibm_watsonx_code_assistant_user_guide/index#configuring-lightspeed-onpremise_set-up-lightspeed[Setting up {LightspeedShortName} on-premise deployment] in the _{LightspeedFullName} User Guide_. + +== {AAPRHDH} +The {AAPRHDH} deliver an Ansible-first {RHDH} user experience that simplifies creating Ansible content, such as playbooks and collections, for Ansible users of all skill levels. The Ansible plug-ins provide curated content and features to accelerate Ansible learner onboarding and streamline Ansible use case adoption across your organization. + +The Ansible plug-ins provide the following capabilities: + +* A customized home page and navigation tailored to Ansible users +* Curated Ansible learning paths to help users new to Ansible +* Software templates for creating Ansible playbooks and collection projects that follow best practices +* Links to supported development environments and tools with opinionated configurations + +For more information, see the link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.4/html/installing_ansible_plug-ins_for_red_hat_developer_hub/rhdh-intro_aap-plugin-rhdh-installing#rhdh-about-plugins_rhdh-intro[{AAPRHDH}] documentation. + +== {ToolsName} +{ToolsName} is a suite of tools provided with the {PlatformNameShort} to help automation creators create, test, and deploy playbook projects, execution environments, and collections on Linux, MacOS, and Windows platforms. Consolidating core Ansible tools into a single package simplifies tool management and promotes recommended practices in the automation content creation experience. + +{ToolsName} are distributed in an RPM package for RHEL systems, and in a supported container distribution that can be used on Linux, Mac, and Windows OS. + +{ToolsName} comprise the following tools: + +* ansible-builder +* ansible-core +* ansible-lint +* ansible-navigator +* ansible-sign +* Molecule +* ansible-creator +* ansible-dev-environment +* pytest-ansible +* tox-ansible + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.4/html/developing_ansible_automation_content/index[Developing Ansible automation content]. + +== {SaaSonAWS} + +{SaaSonAWS} is a deployment of the {PlatformNameShort} control plane purchased through AWS Marketplace. Red{nbsp}Hat manages the service so that customer teams can focus on automation. + +For more information, see link:{BaseURL}/ansible_on_clouds/2.x/html-single/red_hat_ansible_automation_platform_service_on_aws/index[{SaaSonAWS}]. + +== Enhancements + +* Added the ability to provide `mounts.conf` or copy from a local or remote source when installing Podman. (AAP-16214) + +* Updated the inventory file to include the SSL key and certificate parameters for provided SSL web certificates. (AAP-13728) + +* Added an {PlatformNameShort} operator-version label on Kubernetes resources created by the operator. (AAP-31058) + +* Added installation variables to support PostgreSQL certificate authentication for user-provided databases. (AAP-1095) + +* Updated NGINX to version 1.22. (AAP-15128) + +* Added a new configuration endpoint for the REST API. (AAP-13639) + +* Allowed adjustment of *RuntimeDirectorySize* for Podman environments at the time of installation. (AAP-11597) + +* Added support for the *SAFE_PLUGINS_FOR_PORT_FORWARD* setting for *eda-server* to the installation program. (AAP-21503) + +* Aligned inventory content to tested topologies and added comments for easier access to groups and variables when custom configurations are required. (AAP-30242) + +* The variable *`automationedacontroller_allowed_hostnames`* is no longer needed and is no longer supported for {EDAName} installations. (AAP-24421) + +* The *eda-server* now opens the ports for a rulebook with a source plugin that requires inbound connections only if that plugin is allowed in the settings. (AAP-17416) + +* The {EDAName} settings are now moved to a dedicated YAML file. (AAP-13276) + +* Starting with {PlatformNameShort} 2.5, customers using the controller collection (`ansible.controller`) have the platform collection (`ansible.platform`) as a single point of entry, and must use the platform collection to seed organizations, users, and teams. (AAP-31517) + +* Users are opted in for {Analytics} by default when activating {ControllerName} on first time log in. (ANSTRAT-875) + +//// +THE FOLLOWING IS THE SNIPPET FOR TECH. PREVIEW. ADD THIS SNIPPET IF THERE ARE ANY TECH. PREVIEW FEATURES FOR THE RELEASE. AAP 2.5 HAD NO TECH. PREVIEW FEATURES. +== Technology Preview + +include::../snippets/technology-preview.adoc[] + +The following are Technology Preview features: + +* Starting with {PlatformNameShort} 2.4, the Platform Resource Operator can be used to create the following resources in {ControllerName} by applying YAML to your OpenShift cluster: +** Inventories +** Projects +** Instance Groups +** Credentials +** Schedules +** Workflow Job Templates +** Launch Workflows + +You can now configure the Controller Access Token for each resource with the `connection_secret` parameter, rather than the `tower_auth_secret` parameter. This change is compatible with earlier versions, but the `tower_auth_secret` parameter is now deprecated and will be removed in a future release. + +[role="_additional-resources"] +.Additional resources + +* For the most recent list of Technology Preview features, see link:https://access.redhat.com/articles/ansible-automation-platform-preview-features[Ansible Automation Platform - Preview Features]. +//// \ No newline at end of file diff --git a/downstream/titles/release-notes/topics/async-updates.adoc b/downstream/titles/release-notes/topics/async-updates.adoc deleted file mode 100644 index 85b6a76cb0..0000000000 --- a/downstream/titles/release-notes/topics/async-updates.adoc +++ /dev/null @@ -1,18 +0,0 @@ - -= Asynchronous updates - -Security, bug fix, and enhancement updates for {PlatformNameShort} {PlatformVers} are released as asynchronous erratas. All {PlatformNameShort} erratas are available on the link:{PlatformDownloadUrl}[Download {PlatformName}] page in the Customer Portal. - -As a Red Hat Customer Portal user, you can enable errata notifications in the account settings for Red Hat Subscription Management (RHSM). When errata notifications are enabled, you receive notifications through email whenever new erratas relevant to your registered systems are released. - -[NOTE] -==== -Red Hat Customer Portal user accounts must have systems registered and consuming {PlatformNameShort} entitlements for {PlatformNameShort} errata notification emails to generate. -==== - -The Asynchronous updates section of the release notes will be updated over time to give notes on enhancements and bug fixes for asynchronous errata releases of {PlatformNameShort} 2.4. - -[role="_additional-resources"] -.Additional resources -* For more information about asynchronous errata support in {PlatformNameShort}, see link:https://access.redhat.com/support/policy/updates/ansible-automation-platform[{PlatformName} Life Cycle]. -* For information about Common Vulnerabilities and Exposures (CVEs), see link:https://www.redhat.com/en/topics/security/what-is-cve[What is a CVE?] and link:https://access.redhat.com/security/security-updates/cve[Red Hat CVE Database]. diff --git a/downstream/titles/release-notes/topics/controller-440.adoc b/downstream/titles/release-notes/topics/controller-440.adoc deleted file mode 100644 index 655657574d..0000000000 --- a/downstream/titles/release-notes/topics/controller-440.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// This is the release notes for Automation Controller 4.4, the version number is removed from the topic title as part of the release notes restructuring efforts. - -[[controller-440-intro]] -= {ControllerNameStart} - -{ControllerNameStart} helps teams manage complex multitiered deployments by adding control, knowledge, and delegation to Ansible-powered environments. - -See link:https://docs.ansible.com/automation-controller/latest/html/release-notes/relnotes.html#release-notes-for-4-x[Automation Controller Release Notes for 4.x] for a full list of new features and enhancements. diff --git a/downstream/titles/release-notes/topics/docs-24.adoc b/downstream/titles/release-notes/topics/docs-24.adoc deleted file mode 100644 index eda991403a..0000000000 --- a/downstream/titles/release-notes/topics/docs-24.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// This is the release notes for AAP 2.4 documentation, the version number is removed from the topic title as part of the release notes restructuring efforts. - -[[docs-2.4-intro]] -= {PlatformNameShort} documentation - -{PlatformName} 2.4 documentation includes significant feature updates as well as documentation enhancements and offers an improved user experience. - -.New features and enhancements - -* With the removal of the on-premise component {CatalogName} from {PlatformNameShort} 2.4 onwards, all {CatalogName} documentation is removed from the {PlatformNameShort} 2.4 documentation. - -* The following documents are created to help you install and use {EDAName}, the newest capability of {PlatformNameShort}: - -** link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/getting_started_with_event-driven_ansible_guide/index[Getting Started with Event-Driven Ansible] - -** link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/event-driven_ansible_controller_user_guide/index[Event Driven Ansible User Guide] - -In addition, sections of the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_planning_guide/index[Ansible Automation Platform Planning Guide] -and the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/index[Ansible Automation Platform Installation Guide] are updated to include instructions for planning and installing {EDAName}. - -* The {HubName} documentation has had significant reorganization to combine the content spread across 9 separate documents into the following documents: - -_Getting started with automation hub_:: -Use this guide to perform the initial steps required to use Red Hat {HubName} as the default source for Ansible collections content. - -_Managing content in automation hub_:: -Use this guide to understand how to create and manage collections, content and repositories in {HubName}. - -_Red Hat Ansible Automation Platform Installation Guide_:: -Use this guide to learn how to install {PlatformNameShort} based on supported installation scenarios. - -* The _Managing Red Hat Certified and Ansible Galaxy collections in automation hub guide_ has been moved to the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/managing_content_in_automation_hub/index#managing-cert-valid-content[_Red Hat Certified, validated, and Ansible Galaxy content in automation hub_] topic in the _Managing content in automation hub_ guide. - -* The {PlatformNameShort} 2.4 Release Notes are restructured to improve the experience for our customers and the Ansible Community. Users can now view the latest updates based on the {PlatformNameShort} versions, instead of their release timeline. - -* The topic link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/managing_content_in_automation_hub/index#repo-management[Repository management with automation hub] is created to help you create and manage custom repositories in {HubName}. This topic is found in the _Managing content in automation hub_ guide. diff --git a/downstream/titles/release-notes/topics/docs-25.adoc b/downstream/titles/release-notes/topics/docs-25.adoc new file mode 100644 index 0000000000..b4e99c628a --- /dev/null +++ b/downstream/titles/release-notes/topics/docs-25.adoc @@ -0,0 +1,121 @@ +// This is the release notes for AAP 2.5 documentation, the version number is removed from the topic title as part of the release notes restructuring efforts. + +[[docs-2.5-intro]] += {PlatformNameShort} documentation + +{PlatformName} 2.5 documentation includes significant feature updates as well as documentation enhancements and offers an improved user experience. + +The following are documentation enhancements in {PlatformNameShort} 2.5: + +* The _Setting up an {ControllerName} token_ chapter that previously existed has been deprecated and replaced with the link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html-single/using_automation_decisions/index#eda-set-up-rhaap-credential-type[Setting up a Red Hat Ansible Automation Platform credential] topic. As the {EDAcontroller} is now integrated with centralized authentication and the Platform UI, this method simplifies the authentication process required for rulebook activations moving forward. + +* Documentation changes for 2.5 reflect terminology and product changes. Additionally, we have consolidated content into fewer documents. ++ +The following table summarizes title changes for the 2.5 release. ++ +// Per call with Lynne Maynard on Mon. 23 Sept., the ask is to hold off on adding hyperlinks to the individual doc guides for 30 Sept. release as there have been many updates in the guide names and we don't want broken links issues. This is to be reconsidered in the next update, ie, update 1. Therefore, I have used "title attributes" and not "link attributes" for the guides. +[cols="2,2"] +|=== +| Version 2.4 document title | Version 2.5 document title + +|{PlatformName} release notes +|Release notes + +|NA +|New: {TitleAnalytics} + +|{PlatformName} planning guide +|{TitlePlanningGuide} + +|Containerized {PlatformNameShort} installation guide (Technology Preview release) +|{TitleContainerizedInstall} (First Generally Available release) + +|Deploying the {PlatformNameShort} operator on {OCPShort} +|{TitleOperatorInstallation} + +a| +* Getting started with {ControllerName} +* Getting started with {HubName} +* Getting started with {EDAName} +|New: {TitleGettingStarted} + +|Installing and configuring central authentication for the {PlatformNameShort} +|{TitleCentralAuth} + +|Getting started with Ansible playbooks +|Getting started with Ansible playbooks + +|{PlatformNameShort} operations guide +|{TitleAAPOperationsGuide} + +|{PlatformNameShort} automation mesh for {OperatorBase} +|{TitleOperatorMesh} + +|{PlatformNameShort} automation mesh for {VMBase} +|{TitleAutomationMesh} + +|Performance considerations for {OperatorBase} +|{TitleOCPPerformanceGuide} + +|{PlatformNameShort} operator backup and recovery guide +|{TitleOperatorBackup} + +|Troubleshooting {PlatformNameShort} +|{TitleTroubleshootingAAP} + +|{PlatformNameShort} hardening guide +|Not available for 2.5 release; to be published at a later date + +|{ControllerName} user guide +|{ControllerUG} + +|{ControllerName} administration guide +|{ControllerAG} + +|{ControllerName} API overview +|{TitleControllerAPIOverview} + +|{ControllerName} API reference +|Automation execution API reference + +|{ControllerName} CLI reference +|Automation execution CLI reference + +|{EDAName} user guide +|{TitleEDAUserGuide} + +|Managing content in {HubName} +| +- Managing automation content + +- Automation content API reference + +|Ansible security automation guide +|Ansible security automation guide + +a| +* Using the automation calculator + +* Viewing reports about your Ansible automation environment + +* Evaluating your automation controller job runs using the job explorer + +* Planning your automation jobs using the automation savings planner +|{TitleAnalytics} + +|{PlatformNameShort} creator guide +|{TitleDevelopAutomationContent} + +|Automation content navigator creator guide +|{TitleNavigatorGuide} + +|Creating and consuming execution environments +|{TitleBuilder} + +|Installing {AAPRHDH} +|{TitlePluginRHDHInstall} + +|Using {AAPRHDH} +|{TitlePluginRHDHUsing} + +|=== diff --git a/downstream/titles/release-notes/topics/eda-24.adoc b/downstream/titles/release-notes/topics/eda-24.adoc deleted file mode 100644 index ad5bb44673..0000000000 --- a/downstream/titles/release-notes/topics/eda-24.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// This is the release notes for Event-Driven Ansible 1.0 for AAP 2.4 release, the version number is removed from the topic title as part of the release notes restructuring efforts. - -[[eda-24-intro]] -= {EDAName} - -{EDAName} is a new way to enhance and expand automation by improving IT speed and agility while enabling consistency and resilience. {EDAName} is designed for simplicity and flexibility. - -.Known issues - -* Both contributor and editor roles cannot set the AWX token. Only users with administrator roles can set the AWX token. - -* Activation-job pods do not have request limits. - -* The onboarding wizard does not request a controller token creation. - -* Users cannot filter through a list of tokens under the *Controller Token* tab. - -* Only the users with administrator rights can set or change their passwords. - -* If there is a failure, an activation with restart policy set to `Always` is unable to restart the failed activation. - -* Disabling and enabling an activation causes the restart count to increase by one count. This behavior results in an incorrect `restart` count. - -* You must run Podman pods with memory limits. - -* Users can add multiple tokens even when only the first AWX token is used. - -* A race condition occurs when creating and rapidly deleting an activation causes errors. - -* When users filter any list, only the items that are on the list get filtered. - -* When ongoing activations start multiple jobs, a few jobs are not recorded in the audit logs. - -* When a job template fails, a few key attributes are missing in the event payload. - -* Restart policy in a Kubernetes deployment does not restart successful activations that are marked as failed. - -* An incorrect status is reported for activations that are disabled or enabled. - -* If the `run_job_template` action fails, the rule is not counted as executed. - -* RHEL 9.2 activations cannot connect to the host. - -* Restarting the {EDAName} server can cause activation states to become stale. - -* Bulk deletion of rulebook activation lists is not consistent, and the deletion can be either successful or unsuccessful. - -* When users access the detail screen of a rule audit, the related rulebook activation link is broken. - -* Long running activations with loads of events can cause an out of disk space issue. Resolved in xref:rpm-24-6[installer release 2.4-6]. - -* Certain characters, such as hyphen (-), forward slash (/), and period (.), are not supported in the event keys. Resolved in xref:rpm-24-3[installer release 2.4-3]. - -* When there are more activations than available workers, disabling the activations incorrectly shows them in running state. Resolved in xref:rpm-24-3[installer release 2.4-3]. - -* {EDAName} activation pods are running out of memory on RHEL 9. Resolved in xref:rpm-24-3[installer release 2.4-3]. - -* When all workers are busy with activation processes, other asynchronous tasks are not executed, such as importing projects. Resolved in xref:rpm-24-3[installer release 2.4-3]. \ No newline at end of file diff --git a/downstream/titles/release-notes/topics/hub-464.adoc b/downstream/titles/release-notes/topics/hub-464.adoc deleted file mode 100644 index 8c1faab655..0000000000 --- a/downstream/titles/release-notes/topics/hub-464.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// This is the release notes for Automation Hub 4.6.4, the version number is removed from the topic title as part of the release notes restructuring efforts. - -[[hub-464-intro]] -= {HubNameStart} - -{HubNameStart} enables you to discover and use new certified automation content, such as Ansible Collections, from Red Hat Ansible and Certified Partners. - -.New features and enhancements - -* This release of {HubName} provides repository management functionality. With repository management, you can create, edit, delete, and move content between repositories. - -.Bug fixes - -* Fixed an issue in the collection keyword search which was returning an incorrect number of results. - -* Added the ability to set *OPT_REFERRALS* option for LDAP, so that users can now successfully log in to the {HubName} by using their LDAP credentials. - -* Fixed an error on the UI when *redhat.openshift* collection's core dependency was throwing a `404 Not Found` error. - -* Fixed an error such that the deprecated execution environments are now skipped while syncing with `registry.redhat.io`. - - diff --git a/downstream/titles/release-notes/topics/operator-240.adoc b/downstream/titles/release-notes/topics/operator-240.adoc deleted file mode 100644 index 5bc0da4611..0000000000 --- a/downstream/titles/release-notes/topics/operator-240.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// This is the release notes for Automation Platform Operator 2.4, the version number is removed from the topic title as part of the release notes restructuring efforts. - -[[operator-240-intro]] -= Automation Platform Operator - -{OperatorPlatform} provides cloud-native, push-button deployment of new {PlatformNameShort} instances in your OpenShift environment. - -.Bug fixes - -* Enabled configuration of resource requirements for {ControllerName} `init` containers. - -* Added *securityContext* for Event-Driven Ansible Operator deployments to be Pod Security Admission compliant. - -* Resolved error `Controller: Error 413 Entity too large` when doing bulk updates. - -* Ansible token is now obfuscated in YAML job details. - diff --git a/downstream/titles/release-notes/topics/platform-intro.adoc b/downstream/titles/release-notes/topics/platform-intro.adoc index a2baaf62cc..43fc5efb6f 100644 --- a/downstream/titles/release-notes/topics/platform-intro.adoc +++ b/downstream/titles/release-notes/topics/platform-intro.adoc @@ -1,22 +1,23 @@ [[platform-introduction]] = Overview of {PlatformName} -{PlatformName} simplifies the development and operation of automation workloads for managing enterprise application infrastructure lifecycles. -{PlatformNameShort} works across multiple IT domains including operations, networking, security, and development, as well as across diverse hybrid environments. -Simple to adopt, use, and understand, {PlatformNameShort} provides the tools needed to rapidly implement enterprise-wide automation, no matter where you are in your automation journey. +{PlatformName} simplifies the development and operation of automation workloads for managing enterprise application infrastructure lifecycles. {PlatformNameShort} works across multiple IT domains, including operations, networking, security, and development, as well as across diverse hybrid environments. Simple to adopt, use, and understand, {PlatformNameShort} provides the tools needed to rapidly implement enterprise-wide automation, no matter where you are in your automation journey. [[whats-included]] -== What is included in {PlatformNameShort} +== What is included in the {PlatformNameShort} -[cols="a,a,a,a,a"] +[%header, %autowidth] |=== -| {PlatformNameShort} | {ControllerNameStart} | {HubNameStart} | {EDAcontroller} | {InsightsShort} +| {PlatformNameShort} | {ControllerNameStart} | {HubNameStart} | {EDAcontroller} | {InsightsShort} | {GatewayStart} + +(Unified UI) -|2.4 | 4.4| -* 4.7 +|2.5 | 4.6.0 +a| +* 4.10.0 * hosted service| -1.0 +1.1.0 | hosted service +| 1.1 |=== @@ -24,13 +25,3 @@ Simple to adopt, use, and understand, {PlatformNameShort} provides the tools nee Red Hat provides different levels of maintenance for each {PlatformNameShort} release. For more information, see link:https://access.redhat.com/support/policy/updates/ansible-automation-platform[{PlatformName} Life Cycle]. -== Upgrading {PlatformNameShort} - -When upgrading, do not use `yum update`. Use the installation program instead. The installation program performs all of the necessary actions required to upgrade to the latest versions of {PlatformNameShort}, including {ControllerName} and {PrivateHubName}. - -.Additional resources -* For information about the components included in {PlatformNameShort}, see the table in xref:whats-included[What is included in {PlatformNameShort}]. - -* For more information about upgrading {PlatformNameShort}, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_upgrade_and_migration_guide/index[{PlatformName} upgrade and migration guide]. - -* For procedures related to using the {PlatformNameShort} installer, see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html/red_hat_ansible_automation_platform_installation_guide/index[{PlatformNameShort} installation guide]. diff --git a/downstream/titles/release-notes/topics/rpm-version-table.adoc b/downstream/titles/release-notes/topics/rpm-version-table.adoc deleted file mode 100644 index d07ee09e7d..0000000000 --- a/downstream/titles/release-notes/topics/rpm-version-table.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// This table contains the component/package versions per each errata advisory - -.Component versions per errata advisory -//cols="a,a" formats the columns as AsciiDoc allowing for AsciiDoc syntax -[cols="2a,3a", options="header"] -|=== -| Errata advisory | Component versions - -| xref:rpm-24-7[RHSA-2024:3781] + -June 10, 2024 | -* `ansible-automation-platform-installer` 2.4-7 -* `ansible-automation-platform-setup` 2.4-7 -* `ansible-core` 2.15.11 -* {ControllerNameStart} 4.5.7 -* {HubNameStart} 4.9.2 -* {EDAName} 1.0.7 - -|=== \ No newline at end of file diff --git a/downstream/titles/release-notes/topics/tech-preview.adoc b/downstream/titles/release-notes/topics/tech-preview.adoc new file mode 100644 index 0000000000..371ad13a0c --- /dev/null +++ b/downstream/titles/release-notes/topics/tech-preview.adoc @@ -0,0 +1,27 @@ +[[tech-preview]] += Technology preview + + +== Technology Preview + +include::../snippets/technology-preview.adoc[] + +The following are Technology Preview features: + +* Starting with {PlatformNameShort} 2.4, the Platform Resource Operator can be used to create the following resources in {ControllerName} by applying YAML to your OpenShift cluster: +** Inventories +** Projects +** Instance Groups +** Credentials +** Schedules +** Workflow Job Templates +** Launch Workflows + +You can now configure the Controller Access Token for each resource with the `connection_secret` parameter, rather than the `tower_auth_secret` parameter. This change is compatible with earlier versions, but the `tower_auth_secret` parameter is now deprecated and will be removed in a future release. + +[role="_additional-resources"] +.Additional resources + +* For the most recent list of Technology Preview features, see link:https://access.redhat.com/articles/ansible-automation-platform-preview-features[Ansible Automation Platform - Preview Features]. + +* For information about execution node enhancements on OpenShift deployments, see link:https://docs.ansible.com/automation-controller/latest/html/administration/instances.html[Managing Capacity With Instances]. diff --git a/downstream/titles/security-guide/docinfo.xml b/downstream/titles/security-guide/docinfo.xml index 9f1ab257c2..fb847664c8 100644 --- a/downstream/titles/security-guide/docinfo.xml +++ b/downstream/titles/security-guide/docinfo.xml @@ -1,4 +1,4 @@ -Red Hat Ansible security automation guide +Implementing security automation Red Hat Ansible Automation Platform 2.5 Identify and manage security events using Ansible diff --git a/downstream/titles/security-guide/master.adoc b/downstream/titles/security-guide/master.adoc index 9187edaa44..2c0bf13757 100644 --- a/downstream/titles/security-guide/master.adoc +++ b/downstream/titles/security-guide/master.adoc @@ -7,7 +7,7 @@ include::attributes/attributes.adoc[] // Book Title -= Red Hat Ansible security automation guide += Implementing security automation include::{Boilerplate}[] diff --git a/downstream/titles/topologies/aap-common b/downstream/titles/topologies/aap-common new file mode 120000 index 0000000000..472eeb4dac --- /dev/null +++ b/downstream/titles/topologies/aap-common @@ -0,0 +1 @@ +../../aap-common \ No newline at end of file diff --git a/downstream/titles/topologies/attributes b/downstream/titles/topologies/attributes new file mode 120000 index 0000000000..a5caaa73a5 --- /dev/null +++ b/downstream/titles/topologies/attributes @@ -0,0 +1 @@ +../../attributes \ No newline at end of file diff --git a/downstream/titles/topologies/docinfo.xml b/downstream/titles/topologies/docinfo.xml new file mode 100644 index 0000000000..ccbc895225 --- /dev/null +++ b/downstream/titles/topologies/docinfo.xml @@ -0,0 +1,13 @@ +Tested deployment models +Red Hat Ansible Automation Platform +2.5 +Plan your deployment of Ansible Automation Platform + + +This guide provides the Red Hat tested and supported toplogies for Red Hat Ansible Automation Platform. + + + + Red Hat Customer Content Services + + diff --git a/downstream/titles/topologies/images b/downstream/titles/topologies/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/downstream/titles/topologies/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/downstream/titles/topologies/master.adoc b/downstream/titles/topologies/master.adoc new file mode 100644 index 0000000000..9b3ab3721c --- /dev/null +++ b/downstream/titles/topologies/master.adoc @@ -0,0 +1,28 @@ +:imagesdir: images +:toclevels: 4 +:context: topologies +include::attributes/attributes.adoc[] + +// Book Title + += Tested deployment models + +include::{Boilerplate}[] + +include::topologies/assembly-overview-tested-deployment-models.adoc[leveloffset=+1] + +//RPM topologies +include::topologies/assembly-rpm-topologies.adoc[leveloffset=+1] + +//Container topologies +include::topologies/assembly-container-topologies.adoc[leveloffset=+1] + +//Operator topologies +include::topologies/assembly-ocp-topologies.adoc[leveloffset=+1] + +//Automation mesh nodes +include::topologies/topologies/ref-mesh-nodes.adoc[leveloffset=+1] + +//Additional resources appendix +[appendix] +include::topologies/assembly-appendix-topology-resources.adoc[leveloffset=+1] diff --git a/downstream/titles/topologies/topologies b/downstream/titles/topologies/topologies new file mode 120000 index 0000000000..760101fd3c --- /dev/null +++ b/downstream/titles/topologies/topologies @@ -0,0 +1 @@ +../../assemblies/topologies \ No newline at end of file diff --git a/downstream/titles/troubleshooting-aap/master.adoc b/downstream/titles/troubleshooting-aap/master.adoc index 8f717f7564..0fd5eeedfb 100644 --- a/downstream/titles/troubleshooting-aap/master.adoc +++ b/downstream/titles/troubleshooting-aap/master.adoc @@ -18,7 +18,10 @@ include::troubleshooting-aap/assembly-troubleshoot-backup-recovery.adoc[leveloff include::troubleshooting-aap/assembly-troubleshoot-execution-environments.adoc[leveloffset=+1] include::troubleshooting-aap/assembly-troubleshoot-installation.adoc[leveloffset=+1] include::troubleshooting-aap/assembly-troubleshoot-jobs.adoc[leveloffset=+1] -include::troubleshooting-aap/assembly-troubleshoot-login.adoc[leveloffset=+1] +// Michelle - commenting out as it refers to controller UI which should no longer be accessed +//include::troubleshooting-aap/assembly-troubleshoot-login.adoc[leveloffset=+1] include::troubleshooting-aap/assembly-troubleshoot-networking.adoc[leveloffset=+1] include::troubleshooting-aap/assembly-troubleshoot-playbooks.adoc[leveloffset=+1] -include::troubleshooting-aap/assembly-troubleshoot-subscriptions.adoc[leveloffset=+1] \ No newline at end of file +include::troubleshooting-aap/assembly-troubleshoot-upgrade.adoc[leveloffset=+1] +// Michelle - commenting out for now as this content doesn't appear to exist anymore in a published doc +//include::troubleshooting-aap/assembly-troubleshoot-subscriptions.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/titles/updating-aap/aap-common b/downstream/titles/updating-aap/aap-common new file mode 120000 index 0000000000..472eeb4dac --- /dev/null +++ b/downstream/titles/updating-aap/aap-common @@ -0,0 +1 @@ +../../aap-common \ No newline at end of file diff --git a/downstream/titles/updating-aap/attributes b/downstream/titles/updating-aap/attributes new file mode 120000 index 0000000000..a5caaa73a5 --- /dev/null +++ b/downstream/titles/updating-aap/attributes @@ -0,0 +1 @@ +../../attributes \ No newline at end of file diff --git a/downstream/titles/updating-aap/docinfo.xml b/downstream/titles/updating-aap/docinfo.xml new file mode 100644 index 0000000000..4b4908c021 --- /dev/null +++ b/downstream/titles/updating-aap/docinfo.xml @@ -0,0 +1,13 @@ +Updating from Ansible Automation Platform 2.5 to 2.5.x +Red Hat Ansible Automation Platform +2.5 +Perform a patch update from Ansible Automation Platform 2.5 to 2.5.x + + +This guide shows how to perform a patch update from Ansible Automation Platform 2.5 to 2.5.x for each installation type. + + + + Red Hat Customer Content Services + + diff --git a/downstream/titles/updating-aap/images b/downstream/titles/updating-aap/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/downstream/titles/updating-aap/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/downstream/titles/updating-aap/master.adoc b/downstream/titles/updating-aap/master.adoc new file mode 100644 index 0000000000..27bb46df48 --- /dev/null +++ b/downstream/titles/updating-aap/master.adoc @@ -0,0 +1,24 @@ +:imagesdir: images +:toclevels: 4 +:experimental: + +:context: updating-aap + +include::attributes/attributes.adoc[] + +// Book Title + += Updating from Ansible Automation Platform 2.5 to 2.5.x + +include::{Boilerplate}[] + +You can perform patch updates to your {PlatformNameShort} installation as updates are released. This only applies to updates from 2.5 to 2.5.x. + +[NOTE] +==== +Upgrades from 2.4 to 2.5 are unsupported at this time. For more information, see this link:https://access.redhat.com/solutions/7089196[Knowledgebase article]. +==== + +include::platform/assembly-update-rpm.adoc[leveloffset=+1] +include::platform/assembly-update-container.adoc[leveloffset=+1] +// [hherbly]: moved to Installing on OCP guide per AAP-34122 include::platform/assembly-update-ocp.adoc[leveloffset=+1] \ No newline at end of file diff --git a/downstream/titles/updating-aap/platform b/downstream/titles/updating-aap/platform new file mode 120000 index 0000000000..06b49528ee --- /dev/null +++ b/downstream/titles/updating-aap/platform @@ -0,0 +1 @@ +../../assemblies/platform \ No newline at end of file diff --git a/downstream/titles/upgrade/docinfo.xml b/downstream/titles/upgrade/docinfo.xml index 01d64bfa69..0def8006be 100644 --- a/downstream/titles/upgrade/docinfo.xml +++ b/downstream/titles/upgrade/docinfo.xml @@ -1,4 +1,4 @@ -Red Hat Ansible Automation Platform upgrade and migration guide +RPM upgrade and migration Red Hat Ansible Automation Platform 2.5 Upgrade and migrate legacy deployments of Ansible Automation Platform diff --git a/downstream/titles/upgrade/master.adoc b/downstream/titles/upgrade/master.adoc index 99be853bab..debb660b71 100644 --- a/downstream/titles/upgrade/master.adoc +++ b/downstream/titles/upgrade/master.adoc @@ -6,13 +6,14 @@ include::attributes/attributes.adoc[] // Book Title -= Red Hat Ansible Automation Platform upgrade and migration guide += RPM upgrade and migration include::{Boilerplate}[] include::platform/assembly-aap-upgrades.adoc[leveloffset=+1] include::platform/assembly-aap-upgrading-platform.adoc[leveloffset=+1] -include::platform/assembly-migrate-legacy-venv-to-ee.adoc[leveloffset=+1] -include::platform/assembly-migrate-isolated-execution-nodes.adoc[leveloffset=+1] -include::platform/assembly-content-migration.adoc[leveloffset=+1] -include::platform/assembly-converting-playbooks-for-aap2.adoc[leveloffset=+1] +include::platform/assembly-aap-post-upgrade.adoc[leveloffset=+1] +// include::platform/assembly-migrate-legacy-venv-to-ee.adoc[leveloffset=+1] +// include::platform/assembly-migrate-isolated-execution-nodes.adoc[leveloffset=+1] +// include::platform/assembly-content-migration.adoc[leveloffset=+1] +// include::platform/assembly-converting-playbooks-for-aap2.adoc[leveloffset=+1]