Skip to content

Issue Monster

Issue Monster #4648

# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"9329c2054bd71919cd82788b2a41b4d98fc079188e3dd7c0ebc46b6bbc534f07","strict":true,"agent_id":"copilot","agent_model":"claude-haiku-4.5"}
# gh-aw-manifest: {"version":1,"secrets":["COPILOT_GITHUB_TOKEN","GH_AW_AGENT_TOKEN","GH_AW_GITHUB_MCP_SERVER_TOKEN","GH_AW_GITHUB_TOKEN","GITHUB_TOKEN"],"actions":[{"repo":"actions/checkout","sha":"de0fac2e4500dabe0009e67214ff5f5447ce83dd","version":"v6.0.2"},{"repo":"actions/download-artifact","sha":"3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c","version":"v8.0.1"},{"repo":"actions/github-script","sha":"3a2844b7e9c422d3c10d287c895573f7108da1b3","version":"v9"},{"repo":"actions/setup-node","sha":"48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e","version":"v6.4.0"},{"repo":"actions/upload-artifact","sha":"043fb46d1a93c77aae656e7c1c64a875d1fc6a0a","version":"v7.0.1"}],"containers":[{"image":"ghcr.io/github/gh-aw-firewall/agent:0.25.35"},{"image":"ghcr.io/github/gh-aw-firewall/api-proxy:0.25.35"},{"image":"ghcr.io/github/gh-aw-firewall/cli-proxy:0.25.35"},{"image":"ghcr.io/github/gh-aw-firewall/squid:0.25.35"},{"image":"ghcr.io/github/gh-aw-mcpg:v0.3.3"},{"image":"ghcr.io/github/github-mcp-server:v1.0.3","digest":"sha256:2ac27ef03461ef2b877031b838a7d1fd7f12b12d4ace7796d8cad91446d55959","pinned_image":"ghcr.io/github/github-mcp-server:v1.0.3@sha256:2ac27ef03461ef2b877031b838a7d1fd7f12b12d4ace7796d8cad91446d55959"},{"image":"node:lts-alpine","digest":"sha256:d1b3b4da11eefd5941e7f0b9cf17783fc99d9c6fc34884a665f40a06dbdfc94f","pinned_image":"node:lts-alpine@sha256:d1b3b4da11eefd5941e7f0b9cf17783fc99d9c6fc34884a665f40a06dbdfc94f"}]}
# ___ _ _
# / _ \ | | (_)
# | |_| | __ _ ___ _ __ | |_ _ ___
# | _ |/ _` |/ _ \ '_ \| __| |/ __|
# | | | | (_| | __/ | | | |_| | (__
# \_| |_/\__, |\___|_| |_|\__|_|\___|
# __/ |
# _ _ |___/
# | | | | / _| |
# | | | | ___ _ __ _ __| |_| | _____ ____
# | |/\| |/ _ \ '__| |/ /| _| |/ _ \ \ /\ / / ___|
# \ /\ / (_) | | | | ( | | | | (_) \ V V /\__ \
# \/ \/ \___/|_| |_|\_\|_| |_|\___/ \_/\_/ |___/
#
# This file was automatically generated by gh-aw. DO NOT EDIT.
#
# To update this file, edit the corresponding .md file and run:
# gh aw compile
# Not all edits will cause changes to this file.
#
# For more information: https://github.github.com/gh-aw/introduction/overview/
#
# The Cookie Monster of issues - assigns issues to Copilot coding agent one at a time
#
# Resolved workflow manifest:
# Imports:
# - shared/activation-app.md
# - shared/github-guard-policy.md
#
# Secrets used:
# - COPILOT_GITHUB_TOKEN
# - GH_AW_AGENT_TOKEN
# - GH_AW_GITHUB_MCP_SERVER_TOKEN
# - GH_AW_GITHUB_TOKEN
# - GITHUB_TOKEN
#
# Custom actions used:
# - actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
# - actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
# - actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9
# - actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
# - actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0
# - actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
#
# Container images used:
# - ghcr.io/github/gh-aw-firewall/agent:0.25.35
# - ghcr.io/github/gh-aw-firewall/api-proxy:0.25.35
# - ghcr.io/github/gh-aw-firewall/cli-proxy:0.25.35
# - ghcr.io/github/gh-aw-firewall/squid:0.25.35
# - ghcr.io/github/gh-aw-mcpg:v0.3.3
# - ghcr.io/github/github-mcp-server:v1.0.3@sha256:2ac27ef03461ef2b877031b838a7d1fd7f12b12d4ace7796d8cad91446d55959
# - node:lts-alpine@sha256:d1b3b4da11eefd5941e7f0b9cf17783fc99d9c6fc34884a665f40a06dbdfc94f
name: "Issue Monster"
"on":
# permissions: # Permissions applied to pre-activation job
# issues: read
# pull-requests: read
schedule:
- cron: "*/30 * * * *"
# Friendly format: every 30m
# skip-if-check-failing: # Skip-if-check-failing processed as check status gate in pre-activation job
# allow-pending: true
# include:
# - build
# - test
# - lint-go
# - lint-js
# skip-if-match: # Skip-if-match processed as search check in pre-activation job
# max: 5
# query: is:pr is:open is:draft author:app/copilot-swe-agent
# skip-if-no-match: is:issue is:open # Skip-if-no-match processed as search check in pre-activation job
# steps: # Steps injected into pre-activation job
# - id: search
# name: Search for candidate issues
# uses: actions/github-script@v9
# with:
# script: |
# const { owner, repo } = context.repo;
# const MAX_ISSUES_WITH_BODY_CONTEXT = 8;
# const BODY_SNIPPET_MAX_LENGTH = 600;
#
# try {
# // Check for recent rate-limited PRs to avoid scheduling more work during rate limiting
# core.info('Checking for recent rate-limited PRs...');
# const rateLimitCheckDate = new Date();
# rateLimitCheckDate.setHours(rateLimitCheckDate.getHours() - 1); // Check last hour
# // Format as YYYY-MM-DDTHH:MM:SS for GitHub search API
# const rateLimitCheckISO = rateLimitCheckDate.toISOString().split('.')[0] + 'Z';
#
# const recentPRsQuery = `is:pr author:app/copilot-swe-agent created:>${rateLimitCheckISO} repo:${owner}/${repo}`;
# const recentPRsResponse = await github.rest.search.issuesAndPullRequests({
# q: recentPRsQuery,
# per_page: 10,
# sort: 'created',
# order: 'desc'
# });
#
# core.info(`Found ${recentPRsResponse.data.total_count} recent Copilot PRs to check for rate limiting`);
#
# // Check if any recent PRs have rate limit indicators
# let rateLimitDetected = false;
# for (const pr of recentPRsResponse.data.items) {
# try {
# const prTimelineQuery = `
# query($owner: String!, $repo: String!, $number: Int!) {
# repository(owner: $owner, name: $repo) {
# pullRequest(number: $number) {
# timelineItems(first: 50, itemTypes: [ISSUE_COMMENT]) {
# nodes {
# __typename
# ... on IssueComment {
# body
# createdAt
# }
# }
# }
# }
# }
# }
# `;
#
# const prTimelineResult = await github.graphql(prTimelineQuery, {
# owner,
# repo,
# number: pr.number
# });
#
# const comments = prTimelineResult?.repository?.pullRequest?.timelineItems?.nodes || [];
# const rateLimitPattern = /rate limit|API rate limit|secondary rate limit|abuse detection|429|too many requests/i;
#
# for (const comment of comments) {
# if (comment.body && rateLimitPattern.test(comment.body)) {
# core.warning(`Rate limiting detected in PR #${pr.number}: ${comment.body.substring(0, 200)}`);
# rateLimitDetected = true;
# break;
# }
# }
#
# if (rateLimitDetected) break;
# } catch (error) {
# core.warning(`Could not check PR #${pr.number} for rate limiting: ${error.message}`);
# }
# }
#
# if (rateLimitDetected) {
# core.warning('🛑 Rate limiting detected in recent PRs. Skipping issue assignment to prevent further rate limit issues.');
# core.setOutput('issue_count', 0);
# core.setOutput('issue_numbers', '');
# core.setOutput('issue_list', '');
# core.setOutput('issue_context', '');
# core.setOutput('has_issues', 'false');
# return;
# }
#
# core.info('✓ No rate limiting detected. Proceeding with issue search.');
#
# // Labels that indicate an issue should NOT be auto-assigned
# const excludeLabels = [
# 'wontfix',
# 'duplicate',
# 'invalid',
# 'question',
# 'discussion',
# 'needs-discussion',
# 'blocked',
# 'on-hold',
# 'waiting-for-feedback',
# 'needs-more-info',
# 'no-bot',
# 'no-campaign'
# ];
#
# // Labels that indicate an issue is a GOOD candidate for auto-assignment
# const priorityLabels = [
# 'good first issue',
# 'good-first-issue',
# 'bug',
# 'enhancement',
# 'feature',
# 'documentation',
# 'tech-debt',
# 'refactoring',
# 'performance',
# 'security'
# ];
#
# // Search for open issues with "cookie" label and without excluded labels
# // The "cookie" label indicates issues that are approved work queue items from automated workflows
# const query = `is:issue is:open repo:${owner}/${repo} label:cookie -label:"${excludeLabels.join('" -label:"')}"`;
# core.info(`Searching: ${query}`);
# const response = await github.rest.search.issuesAndPullRequests({
# q: query,
# per_page: 100,
# sort: 'created',
# order: 'desc'
# });
# core.info(`Found ${response.data.total_count} total issues matching basic criteria`);
#
# // Fetch full details for each issue to get labels, assignees, sub-issues, and linked PRs
# // Track integrity-filtered issues to emit a diagnostic summary
# const integrityFilteredIssues = [];
# const issuesWithDetails = (await Promise.all(
# response.data.items.map(async (issue) => {
# // Fetch full issue details — some issues may be blocked by integrity policy
# let fullIssue;
# try {
# fullIssue = await github.rest.issues.get({
# owner,
# repo,
# issue_number: issue.number
# });
# } catch (fetchError) {
# // Integrity-filtered issues (403/451) or other transient errors should be
# // skipped individually rather than failing the entire batch
# const status = fetchError.status || fetchError.response?.status;
# // 403 = Forbidden (integrity policy), 451 = Unavailable For Legal Reasons
# const isIntegrityBlock = status === 403 || status === 451 ||
# /\bintegrity\b/i.test(fetchError.message || '');
# const errorSummary = (fetchError.message || String(fetchError)).slice(0, 120);
# if (isIntegrityBlock) {
# integrityFilteredIssues.push(issue.number);
# core.warning(`⚠️ Skipping issue #${issue.number}: blocked by integrity policy (HTTP ${status || 'unknown'}): ${errorSummary}`);
# } else {
# core.warning(`⚠️ Skipping issue #${issue.number}: could not fetch details (HTTP ${status || 'unknown'}): ${errorSummary}`);
# }
# return null;
# }
#
# // Check if this issue has sub-issues and linked PRs using GraphQL
# let subIssuesCount = 0;
# let linkedPRs = [];
# try {
# const issueDetailsQuery = `
# query($owner: String!, $repo: String!, $number: Int!) {
# repository(owner: $owner, name: $repo) {
# issue(number: $number) {
# subIssues {
# totalCount
# }
# timelineItems(first: 100, itemTypes: [CROSS_REFERENCED_EVENT]) {
# nodes {
# ... on CrossReferencedEvent {
# source {
# __typename
# ... on PullRequest {
# number
# state
# isDraft
# author {
# login
# }
# }
# }
# }
# }
# }
# }
# }
# }
# `;
# const issueDetailsResult = await github.graphql(issueDetailsQuery, {
# owner,
# repo,
# number: issue.number
# });
#
# subIssuesCount = issueDetailsResult?.repository?.issue?.subIssues?.totalCount || 0;
#
# // Extract linked PRs from timeline
# const timelineItems = issueDetailsResult?.repository?.issue?.timelineItems?.nodes || [];
# linkedPRs = timelineItems
# .filter(item => item?.source?.__typename === 'PullRequest')
# .map(item => ({
# number: item.source.number,
# state: item.source.state,
# isDraft: item.source.isDraft,
# author: item.source.author?.login
# }));
#
# core.info(`Issue #${issue.number} has ${linkedPRs.length} linked PR(s)`);
# } catch (error) {
# // If GraphQL query fails, continue with defaults
# core.warning(`Could not check details for #${issue.number}: ${error.message}`);
# }
#
# return {
# ...fullIssue.data,
# subIssuesCount,
# linkedPRs
# };
# })
# )).filter(Boolean); // Remove null entries (integrity-filtered or otherwise skipped)
#
# // Emit diagnostic summary for integrity-filtered issues
# if (integrityFilteredIssues.length > 0) {
# core.warning(`🛡️ Integrity filter diagnostic: ${integrityFilteredIssues.length} issue(s) were skipped due to integrity policy: #${integrityFilteredIssues.join(', #')}. These issues will be excluded from this run.`);
# }
#
# // Filter and score issues
# const scoredIssues = issuesWithDetails
# .filter(issue => {
# // Exclude issues that already have assignees
# if (issue.assignees && issue.assignees.length > 0) {
# core.info(`Skipping #${issue.number}: already has assignees`);
# return false;
# }
#
# // Exclude issues with excluded labels (double check)
# const issueLabels = issue.labels.map(l => l.name.toLowerCase());
# if (issueLabels.some(label => excludeLabels.map(l => l.toLowerCase()).includes(label))) {
# core.info(`Skipping #${issue.number}: has excluded label`);
# return false;
# }
#
# // Exclude issues with campaign labels (campaign:*)
# // Campaign items are managed by campaign orchestrators
# if (issueLabels.some(label => label.startsWith('campaign:'))) {
# core.info(`Skipping #${issue.number}: has campaign label (managed by campaign orchestrator)`);
# return false;
# }
#
# // Exclude issues that have sub-issues (parent/organizing issues)
# if (issue.subIssuesCount > 0) {
# core.info(`Skipping #${issue.number}: has ${issue.subIssuesCount} sub-issue(s) - parent issues are used for organizing, not tasks`);
# return false;
# }
#
# // Exclude issues with closed PRs (treat as complete)
# const closedPRs = issue.linkedPRs?.filter(pr => pr.state === 'CLOSED' || pr.state === 'MERGED') || [];
# if (closedPRs.length > 0) {
# core.info(`Skipping #${issue.number}: has ${closedPRs.length} closed/merged PR(s) - treating as complete`);
# return false;
# }
#
# // Exclude issues with open PRs from Copilot coding agent
# const openCopilotPRs = issue.linkedPRs?.filter(pr =>
# pr.state === 'OPEN' &&
# (pr.author === 'copilot-swe-agent' || pr.author?.includes('copilot'))
# ) || [];
# if (openCopilotPRs.length > 0) {
# core.info(`Skipping #${issue.number}: has ${openCopilotPRs.length} open PR(s) from Copilot - already being worked on`);
# return false;
# }
#
# return true;
# })
# .map(issue => {
# const issueLabels = issue.labels.map(l => l.name.toLowerCase());
# let score = 0;
#
# // Score based on priority labels (higher score = higher priority)
# if (issueLabels.includes('good first issue') || issueLabels.includes('good-first-issue')) {
# score += 50;
# }
# if (issueLabels.includes('bug')) {
# score += 40;
# }
# if (issueLabels.includes('security')) {
# score += 45;
# }
# if (issueLabels.includes('documentation')) {
# score += 35;
# }
# if (issueLabels.includes('enhancement') || issueLabels.includes('feature')) {
# score += 30;
# }
# if (issueLabels.includes('performance')) {
# score += 25;
# }
# if (issueLabels.includes('tech-debt') || issueLabels.includes('refactoring')) {
# score += 20;
# }
#
# // Bonus for issues with clear labels (any priority label)
# if (issueLabels.some(label => priorityLabels.map(l => l.toLowerCase()).includes(label))) {
# score += 10;
# }
#
# // Age bonus: older issues get slight priority (days old / 10)
# const ageInDays = Math.floor((Date.now() - new Date(issue.created_at)) / (1000 * 60 * 60 * 24));
# score += Math.min(ageInDays / 10, 20); // Cap age bonus at 20 points
#
# return {
# number: issue.number,
# title: issue.title,
# labels: issue.labels.map(l => l.name),
# body: issue.body,
# created_at: issue.created_at,
# score
# };
# })
# .sort((a, b) => b.score - a.score); // Sort by score descending
#
# // Format output
# const issueList = scoredIssues.map(i => {
# const labelStr = i.labels.length > 0 ? ` [${i.labels.join(', ')}]` : '';
# return `#${i.number}: ${i.title}${labelStr} (score: ${i.score.toFixed(1)})`;
# }).join('\n');
#
# // Pre-fetch compact body context for top candidates so the agent can
# // triage without extra reads in most runs.
# const issueContext = scoredIssues.slice(0, MAX_ISSUES_WITH_BODY_CONTEXT).map(i => {
# const body = (i.body || '').replace(/\s+/g, ' ').trim();
# const bodySnippet = body.length > BODY_SNIPPET_MAX_LENGTH ? `${body.slice(0, BODY_SNIPPET_MAX_LENGTH)}…` : body;
# const labelStr = i.labels.length > 0 ? i.labels.join(', ') : 'none';
# return `#${i.number} | score=${i.score.toFixed(1)} | labels=${labelStr}\nTitle: ${i.title}\nBody: ${bodySnippet || '(no body)'}`;
# }).join('\n\n---\n\n');
#
# const issueNumbers = scoredIssues.map(i => i.number).join(',');
#
# core.info(`Total candidate issues after filtering: ${scoredIssues.length}`);
# if (scoredIssues.length > 0) {
# core.info(`Top candidates:\n${issueList.split('\n').slice(0, 10).join('\n')}`);
# }
#
# core.setOutput('issue_count', scoredIssues.length);
# core.setOutput('issue_numbers', issueNumbers);
# core.setOutput('issue_list', issueList);
# core.setOutput('issue_context', issueContext);
#
# if (scoredIssues.length === 0) {
# core.info('🍽️ No suitable candidate issues - the plate is empty!');
# core.setOutput('has_issues', 'false');
# } else {
# core.setOutput('has_issues', 'true');
# }
# } catch (error) {
# core.error(`Error searching for issues: ${error.message}`);
# core.setOutput('issue_count', 0);
# core.setOutput('issue_numbers', '');
# core.setOutput('issue_list', '');
# core.setOutput('issue_context', '');
# core.setOutput('has_issues', 'false');
# }
workflow_dispatch:
inputs:
aw_context:
default: ""
description: Agent caller context (used internally by Agentic Workflows).
required: false
type: string
permissions: {}
concurrency:
group: "gh-aw-${{ github.workflow }}-${{ github.event.issue.number || github.event.discussion.number || github.run_id }}"
run-name: "Issue Monster"
jobs:
activation:
needs: pre_activation
if: needs.pre_activation.outputs.activated == 'true' && (needs.pre_activation.outputs.has_issues == 'true')
runs-on: ubuntu-slim
permissions:
actions: read
contents: read
outputs:
comment_id: ""
comment_repo: ""
engine_id: ${{ steps.generate_aw_info.outputs.engine_id }}
lockdown_check_failed: ${{ steps.generate_aw_info.outputs.lockdown_check_failed == 'true' }}
model: ${{ steps.generate_aw_info.outputs.model }}
secret_verification_result: ${{ steps.validate-secret.outputs.verification_result }}
setup-trace-id: ${{ steps.setup.outputs.trace-id }}
stale_lock_file_failed: ${{ steps.check-lock-file.outputs.stale_lock_file_failed == 'true' }}
steps:
- name: Checkout actions folder
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
repository: github/gh-aw
sparse-checkout: |
actions
persist-credentials: false
- name: Setup Scripts
id: setup
uses: ./actions/setup
with:
destination: ${{ runner.temp }}/gh-aw/actions
job-name: ${{ github.job }}
trace-id: ${{ needs.pre_activation.outputs.setup-trace-id }}
env:
GH_AW_SETUP_WORKFLOW_NAME: "Issue Monster"
GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/issue-monster.lock.yml@${{ github.ref }}
GH_AW_INFO_VERSION: "1.0.40"
- name: Generate agentic run info
id: generate_aw_info
env:
GH_AW_INFO_ENGINE_ID: "copilot"
GH_AW_INFO_ENGINE_NAME: "GitHub Copilot CLI"
GH_AW_INFO_MODEL: "claude-haiku-4.5"
GH_AW_INFO_VERSION: "1.0.40"
GH_AW_INFO_AGENT_VERSION: "1.0.40"
GH_AW_INFO_WORKFLOW_NAME: "Issue Monster"
GH_AW_INFO_EXPERIMENTAL: "false"
GH_AW_INFO_SUPPORTS_TOOLS_ALLOWLIST: "true"
GH_AW_INFO_STAGED: "false"
GH_AW_INFO_ALLOWED_DOMAINS: '["defaults"]'
GH_AW_INFO_FIREWALL_ENABLED: "true"
GH_AW_INFO_AWF_VERSION: "v0.25.35"
GH_AW_INFO_AWMG_VERSION: ""
GH_AW_INFO_FIREWALL_TYPE: "squid"
GH_AW_COMPILED_STRICT: "true"
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/generate_aw_info.cjs');
await main(core, context);
- name: Validate COPILOT_GITHUB_TOKEN secret
id: validate-secret
run: bash "${RUNNER_TEMP}/gh-aw/actions/validate_multi_secret.sh" COPILOT_GITHUB_TOKEN 'GitHub Copilot CLI' https://github.github.com/gh-aw/reference/engines/#github-copilot-default
env:
COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }}
- name: Checkout .github and .agents folders
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
sparse-checkout: |
.github
.agents
actions/setup
.claude
.codex
.crush
.gemini
.opencode
.pi
sparse-checkout-cone-mode: true
fetch-depth: 1
- name: Save agent config folders for base branch restoration
env:
GH_AW_AGENT_FOLDERS: ".agents .claude .codex .crush .gemini .github .opencode .pi"
GH_AW_AGENT_FILES: ".crush.json AGENTS.md CLAUDE.md GEMINI.md PI.md opencode.jsonc"
# poutine:ignore untrusted_checkout_exec
run: bash "${RUNNER_TEMP}/gh-aw/actions/save_base_github_folders.sh"
- name: Check workflow lock file
id: check-lock-file
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_AW_WORKFLOW_FILE: "issue-monster.lock.yml"
GH_AW_CONTEXT_WORKFLOW_REF: "${{ github.workflow_ref }}"
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/check_workflow_timestamp_api.cjs');
await main();
- name: Create prompt with built-in context
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
GH_AW_SAFE_OUTPUTS: ${{ runner.temp }}/gh-aw/safeoutputs/outputs.jsonl
GH_AW_GITHUB_ACTOR: ${{ github.actor }}
GH_AW_GITHUB_EVENT_COMMENT_ID: ${{ github.event.comment.id }}
GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: ${{ github.event.discussion.number }}
GH_AW_GITHUB_EVENT_ISSUE_NUMBER: ${{ github.event.issue.number }}
GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }}
GH_AW_GITHUB_REPOSITORY: ${{ github.repository }}
GH_AW_GITHUB_RUN_ID: ${{ github.run_id }}
GH_AW_GITHUB_WORKSPACE: ${{ github.workspace }}
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_CONTEXT: ${{ needs.pre_activation.outputs.issue_context }}
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_COUNT: ${{ needs.pre_activation.outputs.issue_count }}
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_LIST: ${{ needs.pre_activation.outputs.issue_list }}
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_NUMBERS: ${{ needs.pre_activation.outputs.issue_numbers }}
# poutine:ignore untrusted_checkout_exec
run: |
bash "${RUNNER_TEMP}/gh-aw/actions/create_prompt_first.sh"
{
cat << 'GH_AW_PROMPT_9e6b53e1f41f1844_EOF'
<system>
GH_AW_PROMPT_9e6b53e1f41f1844_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/xpia.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/temp_folder_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/markdown.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_prompt.md"
cat << 'GH_AW_PROMPT_9e6b53e1f41f1844_EOF'
<safe-output-tools>
Tools: add_comment(max:3), assign_to_agent(max:3), missing_tool, missing_data, noop
</safe-output-tools>
GH_AW_PROMPT_9e6b53e1f41f1844_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/mcp_cli_tools_prompt.md"
cat << 'GH_AW_PROMPT_9e6b53e1f41f1844_EOF'
<github-context>
The following GitHub context information is available for this workflow:
{{#if __GH_AW_GITHUB_ACTOR__ }}
- **actor**: __GH_AW_GITHUB_ACTOR__
{{/if}}
{{#if __GH_AW_GITHUB_REPOSITORY__ }}
- **repository**: __GH_AW_GITHUB_REPOSITORY__
{{/if}}
{{#if __GH_AW_GITHUB_WORKSPACE__ }}
- **workspace**: __GH_AW_GITHUB_WORKSPACE__
{{/if}}
{{#if __GH_AW_GITHUB_EVENT_ISSUE_NUMBER__ }}
- **issue-number**: #__GH_AW_GITHUB_EVENT_ISSUE_NUMBER__
{{/if}}
{{#if __GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER__ }}
- **discussion-number**: #__GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER__
{{/if}}
{{#if __GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER__ }}
- **pull-request-number**: #__GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER__
{{/if}}
{{#if __GH_AW_GITHUB_EVENT_COMMENT_ID__ }}
- **comment-id**: __GH_AW_GITHUB_EVENT_COMMENT_ID__
{{/if}}
{{#if __GH_AW_GITHUB_RUN_ID__ }}
- **workflow-run-id**: __GH_AW_GITHUB_RUN_ID__
{{/if}}
</github-context>
GH_AW_PROMPT_9e6b53e1f41f1844_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/cli_proxy_with_safeoutputs_prompt.md"
cat << 'GH_AW_PROMPT_9e6b53e1f41f1844_EOF'
</system>
{{#runtime-import .github/workflows/shared/github-guard-policy.md}}
{{#runtime-import .github/workflows/shared/activation-app.md}}
{{#runtime-import .github/workflows/issue-monster.md}}
GH_AW_PROMPT_9e6b53e1f41f1844_EOF
} > "$GH_AW_PROMPT"
- name: Interpolate variables and render templates
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
GH_AW_ENGINE_ID: "copilot"
GH_AW_GITHUB_REPOSITORY: ${{ github.repository }}
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_CONTEXT: ${{ needs.pre_activation.outputs.issue_context }}
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_COUNT: ${{ needs.pre_activation.outputs.issue_count }}
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_LIST: ${{ needs.pre_activation.outputs.issue_list }}
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_NUMBERS: ${{ needs.pre_activation.outputs.issue_numbers }}
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/interpolate_prompt.cjs');
await main();
- name: Substitute placeholders
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
GH_AW_GITHUB_ACTOR: ${{ github.actor }}
GH_AW_GITHUB_EVENT_COMMENT_ID: ${{ github.event.comment.id }}
GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: ${{ github.event.discussion.number }}
GH_AW_GITHUB_EVENT_ISSUE_NUMBER: ${{ github.event.issue.number }}
GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }}
GH_AW_GITHUB_REPOSITORY: ${{ github.repository }}
GH_AW_GITHUB_RUN_ID: ${{ github.run_id }}
GH_AW_GITHUB_WORKSPACE: ${{ github.workspace }}
GH_AW_MCP_CLI_SERVERS_LIST: '- `safeoutputs` — run `safeoutputs --help` to see available tools'
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ACTIVATED: ${{ needs.pre_activation.outputs.activated }}
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_CONTEXT: ${{ needs.pre_activation.outputs.issue_context }}
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_COUNT: ${{ needs.pre_activation.outputs.issue_count }}
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_LIST: ${{ needs.pre_activation.outputs.issue_list }}
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_NUMBERS: ${{ needs.pre_activation.outputs.issue_numbers }}
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const substitutePlaceholders = require('${{ runner.temp }}/gh-aw/actions/substitute_placeholders.cjs');
// Call the substitution function
return await substitutePlaceholders({
file: process.env.GH_AW_PROMPT,
substitutions: {
GH_AW_GITHUB_ACTOR: process.env.GH_AW_GITHUB_ACTOR,
GH_AW_GITHUB_EVENT_COMMENT_ID: process.env.GH_AW_GITHUB_EVENT_COMMENT_ID,
GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: process.env.GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER,
GH_AW_GITHUB_EVENT_ISSUE_NUMBER: process.env.GH_AW_GITHUB_EVENT_ISSUE_NUMBER,
GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: process.env.GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER,
GH_AW_GITHUB_REPOSITORY: process.env.GH_AW_GITHUB_REPOSITORY,
GH_AW_GITHUB_RUN_ID: process.env.GH_AW_GITHUB_RUN_ID,
GH_AW_GITHUB_WORKSPACE: process.env.GH_AW_GITHUB_WORKSPACE,
GH_AW_MCP_CLI_SERVERS_LIST: process.env.GH_AW_MCP_CLI_SERVERS_LIST,
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ACTIVATED: process.env.GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ACTIVATED,
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_CONTEXT: process.env.GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_CONTEXT,
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_COUNT: process.env.GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_COUNT,
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_LIST: process.env.GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_LIST,
GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_NUMBERS: process.env.GH_AW_NEEDS_PRE_ACTIVATION_OUTPUTS_ISSUE_NUMBERS
}
});
- name: Validate prompt placeholders
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
# poutine:ignore untrusted_checkout_exec
run: bash "${RUNNER_TEMP}/gh-aw/actions/validate_prompt_placeholders.sh"
- name: Print prompt
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
# poutine:ignore untrusted_checkout_exec
run: bash "${RUNNER_TEMP}/gh-aw/actions/print_prompt_summary.sh"
- name: Upload activation artifact
if: success()
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: activation
include-hidden-files: true
path: |
/tmp/gh-aw/aw_info.json
/tmp/gh-aw/aw-prompts/prompt.txt
/tmp/gh-aw/github_rate_limits.jsonl
/tmp/gh-aw/base
if-no-files-found: ignore
retention-days: 1
agent:
needs: activation
runs-on: ubuntu-latest
permissions:
contents: read
issues: read
pull-requests: read
env:
DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
GH_AW_ASSETS_ALLOWED_EXTS: ""
GH_AW_ASSETS_BRANCH: ""
GH_AW_ASSETS_MAX_SIZE_KB: 0
GH_AW_MCP_LOG_DIR: /tmp/gh-aw/mcp-logs/safeoutputs
GH_AW_WORKFLOW_ID_SANITIZED: issuemonster
outputs:
agentic_engine_timeout: ${{ steps.detect-copilot-errors.outputs.agentic_engine_timeout || 'false' }}
checkout_pr_success: ${{ steps.checkout-pr.outputs.checkout_pr_success || 'true' }}
effective_tokens: ${{ steps.parse-mcp-gateway.outputs.effective_tokens }}
has_patch: ${{ steps.collect_output.outputs.has_patch }}
inference_access_error: ${{ steps.detect-copilot-errors.outputs.inference_access_error || 'false' }}
mcp_policy_error: ${{ steps.detect-copilot-errors.outputs.mcp_policy_error || 'false' }}
model: ${{ needs.activation.outputs.model }}
model_not_supported_error: ${{ steps.detect-copilot-errors.outputs.model_not_supported_error || 'false' }}
output: ${{ steps.collect_output.outputs.output }}
output_types: ${{ steps.collect_output.outputs.output_types }}
setup-trace-id: ${{ steps.setup.outputs.trace-id }}
steps:
- name: Checkout actions folder
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
repository: github/gh-aw
sparse-checkout: |
actions
persist-credentials: false
- name: Setup Scripts
id: setup
uses: ./actions/setup
with:
destination: ${{ runner.temp }}/gh-aw/actions
job-name: ${{ github.job }}
trace-id: ${{ needs.activation.outputs.setup-trace-id }}
env:
GH_AW_SETUP_WORKFLOW_NAME: "Issue Monster"
GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/issue-monster.lock.yml@${{ github.ref }}
GH_AW_INFO_VERSION: "1.0.40"
- name: Set runtime paths
id: set-runtime-paths
run: |
{
echo "GH_AW_SAFE_OUTPUTS=${RUNNER_TEMP}/gh-aw/safeoutputs/outputs.jsonl"
echo "GH_AW_SAFE_OUTPUTS_CONFIG_PATH=${RUNNER_TEMP}/gh-aw/safeoutputs/config.json"
echo "GH_AW_SAFE_OUTPUTS_TOOLS_PATH=${RUNNER_TEMP}/gh-aw/safeoutputs/tools.json"
} >> "$GITHUB_OUTPUT"
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Create gh-aw temp directory
run: bash "${RUNNER_TEMP}/gh-aw/actions/create_gh_aw_tmp_dir.sh"
- name: Configure gh CLI for GitHub Enterprise
run: bash "${RUNNER_TEMP}/gh-aw/actions/configure_gh_for_ghe.sh"
env:
GH_TOKEN: ${{ github.token }}
- name: Configure Git credentials
env:
REPO_NAME: ${{ github.repository }}
SERVER_URL: ${{ github.server_url }}
GITHUB_TOKEN: ${{ github.token }}
run: |
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
git config --global am.keepcr true
# Re-authenticate git with GitHub token
SERVER_URL_STRIPPED="${SERVER_URL#https://}"
git remote set-url origin "https://x-access-token:${GITHUB_TOKEN}@${SERVER_URL_STRIPPED}/${REPO_NAME}.git"
echo "Git configured with standard GitHub Actions identity"
- name: Checkout PR branch
id: checkout-pr
if: |
github.event.pull_request || github.event.issue.pull_request
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
with:
github-token: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/checkout_pr_branch.cjs');
await main();
- name: Install GitHub Copilot CLI
run: bash "${RUNNER_TEMP}/gh-aw/actions/install_copilot_cli.sh" 1.0.40
env:
GH_HOST: github.com
- name: Install AWF binary
run: bash "${RUNNER_TEMP}/gh-aw/actions/install_awf_binary.sh" v0.25.35
- name: Parse integrity filter lists
id: parse-guard-vars
env:
GH_AW_BLOCKED_USERS_VAR: ${{ vars.GH_AW_GITHUB_BLOCKED_USERS || '' }}
GH_AW_TRUSTED_USERS_VAR: ${{ vars.GH_AW_GITHUB_TRUSTED_USERS || '' }}
GH_AW_APPROVAL_LABELS_EXTRA: cookie,community
GH_AW_APPROVAL_LABELS_VAR: ${{ vars.GH_AW_GITHUB_APPROVAL_LABELS || '' }}
run: bash "${RUNNER_TEMP}/gh-aw/actions/parse_guard_list.sh"
- name: Download activation artifact
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
name: activation
path: /tmp/gh-aw
- name: Restore agent config folders from base branch
if: steps.checkout-pr.outcome == 'success'
env:
GH_AW_AGENT_FOLDERS: ".agents .claude .codex .crush .gemini .github .opencode .pi"
GH_AW_AGENT_FILES: ".crush.json AGENTS.md CLAUDE.md GEMINI.md PI.md opencode.jsonc"
run: bash "${RUNNER_TEMP}/gh-aw/actions/restore_base_github_folders.sh"
- name: Download container images
run: bash "${RUNNER_TEMP}/gh-aw/actions/download_docker_images.sh" ghcr.io/github/gh-aw-firewall/agent:0.25.35 ghcr.io/github/gh-aw-firewall/api-proxy:0.25.35 ghcr.io/github/gh-aw-firewall/cli-proxy:0.25.35 ghcr.io/github/gh-aw-firewall/squid:0.25.35 ghcr.io/github/gh-aw-mcpg:v0.3.3 ghcr.io/github/github-mcp-server:v1.0.3@sha256:2ac27ef03461ef2b877031b838a7d1fd7f12b12d4ace7796d8cad91446d55959 node:lts-alpine@sha256:d1b3b4da11eefd5941e7f0b9cf17783fc99d9c6fc34884a665f40a06dbdfc94f
- name: Generate Safe Outputs Config
run: |
mkdir -p "${RUNNER_TEMP}/gh-aw/safeoutputs"
mkdir -p /tmp/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs
cat > "${RUNNER_TEMP}/gh-aw/safeoutputs/config.json" << 'GH_AW_SAFE_OUTPUTS_CONFIG_d80e1e7470e0cebd_EOF'
{"add_comment":{"max":3,"target":"*"},"assign_to_agent":{"allowed":["copilot"],"max":3,"target":"*"},"create_report_incomplete_issue":{},"missing_data":{},"missing_tool":{},"noop":{"max":1,"report-as-issue":"true"},"report_incomplete":{}}
GH_AW_SAFE_OUTPUTS_CONFIG_d80e1e7470e0cebd_EOF
- name: Generate Safe Outputs Tools
env:
GH_AW_TOOLS_META_JSON: |
{
"description_suffixes": {
"add_comment": " CONSTRAINTS: Maximum 3 comment(s) can be added. Target: *. Supports reply_to_id for discussion threading.",
"assign_to_agent": " CONSTRAINTS: Maximum 3 issue(s) can be assigned to agent."
},
"repo_params": {},
"dynamic_tools": []
}
GH_AW_VALIDATION_JSON: |
{
"add_comment": {
"defaultMax": 1,
"fields": {
"body": {
"required": true,
"type": "string",
"sanitize": true,
"maxLength": 65000
},
"item_number": {
"issueOrPRNumber": true
},
"reply_to_id": {
"type": "string",
"maxLength": 256
},
"repo": {
"type": "string",
"maxLength": 256
}
}
},
"assign_to_agent": {
"defaultMax": 1,
"fields": {
"agent": {
"type": "string",
"sanitize": true,
"maxLength": 128
},
"issue_number": {
"issueNumberOrTemporaryId": true
},
"pull_number": {
"optionalPositiveInteger": true
},
"pull_request_repo": {
"type": "string",
"maxLength": 256
},
"repo": {
"type": "string",
"maxLength": 256
}
},
"customValidation": "requiresOneOf:issue_number,pull_number"
},
"missing_data": {
"defaultMax": 20,
"fields": {
"alternatives": {
"type": "string",
"sanitize": true,
"maxLength": 256
},
"context": {
"type": "string",
"sanitize": true,
"maxLength": 256
},
"data_type": {
"type": "string",
"sanitize": true,
"maxLength": 128
},
"reason": {
"type": "string",
"sanitize": true,
"maxLength": 256
}
}
},
"missing_tool": {
"defaultMax": 20,
"fields": {
"alternatives": {
"type": "string",
"sanitize": true,
"maxLength": 512
},
"reason": {
"required": true,
"type": "string",
"sanitize": true,
"maxLength": 256
},
"tool": {
"type": "string",
"sanitize": true,
"maxLength": 128
}
}
},
"noop": {
"defaultMax": 1,
"fields": {
"message": {
"required": true,
"type": "string",
"sanitize": true,
"maxLength": 65000
}
}
},
"report_incomplete": {
"defaultMax": 5,
"fields": {
"details": {
"type": "string",
"sanitize": true,
"maxLength": 65000
},
"reason": {
"required": true,
"type": "string",
"sanitize": true,
"maxLength": 1024
}
}
}
}
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/generate_safe_outputs_tools.cjs');
await main();
- name: Generate Safe Outputs MCP Server Config
id: safe-outputs-config
run: |
# Generate a secure random API key (360 bits of entropy, 40+ chars)
# Mask immediately to prevent timing vulnerabilities
API_KEY=$(openssl rand -base64 45 | tr -d '/+=')
echo "::add-mask::${API_KEY}"
PORT=3001
# Set outputs for next steps
{
echo "safe_outputs_api_key=${API_KEY}"
echo "safe_outputs_port=${PORT}"
} >> "$GITHUB_OUTPUT"
echo "Safe Outputs MCP server will run on port ${PORT}"
- name: Start Safe Outputs MCP HTTP Server
id: safe-outputs-start
env:
DEBUG: '*'
GH_AW_SAFE_OUTPUTS: ${{ steps.set-runtime-paths.outputs.GH_AW_SAFE_OUTPUTS }}
GH_AW_SAFE_OUTPUTS_PORT: ${{ steps.safe-outputs-config.outputs.safe_outputs_port }}
GH_AW_SAFE_OUTPUTS_API_KEY: ${{ steps.safe-outputs-config.outputs.safe_outputs_api_key }}
GH_AW_SAFE_OUTPUTS_TOOLS_PATH: ${{ runner.temp }}/gh-aw/safeoutputs/tools.json
GH_AW_SAFE_OUTPUTS_CONFIG_PATH: ${{ runner.temp }}/gh-aw/safeoutputs/config.json
GH_AW_MCP_LOG_DIR: /tmp/gh-aw/mcp-logs/safeoutputs
run: |
# Environment variables are set above to prevent template injection
export DEBUG
export GH_AW_SAFE_OUTPUTS
export GH_AW_SAFE_OUTPUTS_PORT
export GH_AW_SAFE_OUTPUTS_API_KEY
export GH_AW_SAFE_OUTPUTS_TOOLS_PATH
export GH_AW_SAFE_OUTPUTS_CONFIG_PATH
export GH_AW_MCP_LOG_DIR
bash "${RUNNER_TEMP}/gh-aw/actions/start_safe_outputs_server.sh"
- name: Start MCP Gateway
id: start-mcp-gateway
env:
GH_AW_SAFE_OUTPUTS: ${{ steps.set-runtime-paths.outputs.GH_AW_SAFE_OUTPUTS }}
GH_AW_SAFE_OUTPUTS_API_KEY: ${{ steps.safe-outputs-start.outputs.api_key }}
GH_AW_SAFE_OUTPUTS_PORT: ${{ steps.safe-outputs-start.outputs.port }}
run: |
set -eo pipefail
mkdir -p "${RUNNER_TEMP}/gh-aw/mcp-config"
# Export gateway environment variables for MCP config and gateway script
export MCP_GATEWAY_PORT="8080"
export MCP_GATEWAY_DOMAIN="host.docker.internal"
export MCP_GATEWAY_HOST_DOMAIN="localhost"
MCP_GATEWAY_API_KEY=$(openssl rand -base64 45 | tr -d '/+=')
echo "::add-mask::${MCP_GATEWAY_API_KEY}"
export MCP_GATEWAY_API_KEY
export MCP_GATEWAY_PAYLOAD_DIR="/tmp/gh-aw/mcp-payloads"
mkdir -p "${MCP_GATEWAY_PAYLOAD_DIR}"
export MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD="524288"
export DEBUG="*"
export GH_AW_ENGINE="copilot"
export GH_AW_MCP_CLI_SERVERS='["safeoutputs"]'
echo 'GH_AW_MCP_CLI_SERVERS=["safeoutputs"]' >> "$GITHUB_ENV"
MCP_GATEWAY_UID=$(id -u 2>/dev/null || echo '0')
MCP_GATEWAY_GID=$(id -g 2>/dev/null || echo '0')
DOCKER_SOCK_GID=$(stat -c '%g' /var/run/docker.sock 2>/dev/null || echo '0')
export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host --add-host host.docker.internal:127.0.0.1 --user '"${MCP_GATEWAY_UID}"':'"${MCP_GATEWAY_GID}"' --group-add '"${DOCKER_SOCK_GID}"' -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_GUARD_MIN_INTEGRITY -e GITHUB_MCP_GUARD_REPOS -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.3.3'
mkdir -p /home/runner/.copilot
GH_AW_NODE=$(which node 2>/dev/null || command -v node 2>/dev/null || echo node)
cat << GH_AW_MCP_CONFIG_cbb44fb6768786df_EOF | "$GH_AW_NODE" "${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.cjs"
{
"mcpServers": {
"safeoutputs": {
"type": "http",
"url": "http://host.docker.internal:$GH_AW_SAFE_OUTPUTS_PORT",
"headers": {
"Authorization": "\${GH_AW_SAFE_OUTPUTS_API_KEY}"
},
"guard-policies": {
"write-sink": {
"accept": [
"*"
]
}
}
}
},
"gateway": {
"port": $MCP_GATEWAY_PORT,
"domain": "${MCP_GATEWAY_DOMAIN}",
"apiKey": "${MCP_GATEWAY_API_KEY}",
"payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}"
}
}
GH_AW_MCP_CONFIG_cbb44fb6768786df_EOF
- name: Mount MCP servers as CLIs
id: mount-mcp-clis
continue-on-error: true
env:
MCP_GATEWAY_API_KEY: ${{ steps.start-mcp-gateway.outputs.gateway-api-key }}
MCP_GATEWAY_DOMAIN: ${{ steps.start-mcp-gateway.outputs.gateway-domain }}
MCP_GATEWAY_PORT: ${{ steps.start-mcp-gateway.outputs.gateway-port }}
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io);
const { main } = require('${{ runner.temp }}/gh-aw/actions/mount_mcp_as_cli.cjs');
await main();
- name: Clean credentials
continue-on-error: true
run: bash "${RUNNER_TEMP}/gh-aw/actions/clean_git_credentials.sh"
- name: Audit pre-agent workspace
id: pre_agent_audit
continue-on-error: true
run: bash "${RUNNER_TEMP}/gh-aw/actions/audit_pre_agent_workspace.sh"
- name: Start CLI Proxy
env:
GH_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
GITHUB_SERVER_URL: ${{ github.server_url }}
CLI_PROXY_POLICY: '{"allow-only":{"min-integrity":"approved","repos":"all"}}'
CLI_PROXY_IMAGE: 'ghcr.io/github/gh-aw-mcpg:v0.3.3'
run: |
bash "${RUNNER_TEMP}/gh-aw/actions/start_cli_proxy.sh"
- name: Execute GitHub Copilot CLI
id: agentic_execution
# Copilot CLI tool arguments (sorted):
timeout-minutes: 30
run: |
set -o pipefail
touch /tmp/gh-aw/agent-step-summary.md
GH_AW_NODE_BIN=$(command -v node 2>/dev/null || true)
export GH_AW_NODE_BIN
(umask 177 && touch /tmp/gh-aw/agent-stdio.log)
printf '%s\n' '{"$schema":"https://github.com/github/gh-aw-firewall/releases/download/v0.25.35/awf-config.schema.json","network":{"allowDomains":["api.business.githubcopilot.com","api.enterprise.githubcopilot.com","api.github.com","api.githubcopilot.com","api.individual.githubcopilot.com","api.snapcraft.io","archive.ubuntu.com","azure.archive.ubuntu.com","crl.geotrust.com","crl.globalsign.com","crl.identrust.com","crl.sectigo.com","crl.thawte.com","crl.usertrust.com","crl.verisign.com","crl3.digicert.com","crl4.digicert.com","crls.ssl.com","github.com","host.docker.internal","json-schema.org","json.schemastore.org","keyserver.ubuntu.com","ocsp.digicert.com","ocsp.geotrust.com","ocsp.globalsign.com","ocsp.identrust.com","ocsp.sectigo.com","ocsp.ssl.com","ocsp.thawte.com","ocsp.usertrust.com","ocsp.verisign.com","packagecloud.io","packages.cloud.google.com","packages.microsoft.com","ppa.launchpad.net","raw.githubusercontent.com","registry.npmjs.org","s.symcb.com","s.symcd.com","security.ubuntu.com","telemetry.enterprise.githubcopilot.com","ts-crl.ws.symantec.com","ts-ocsp.ws.symantec.com","www.googleapis.com"]},"apiProxy":{"enabled":true},"container":{"imageTag":"0.25.35"}}' > "${RUNNER_TEMP}/gh-aw/awf-config.json" && cp "${RUNNER_TEMP}/gh-aw/awf-config.json" /tmp/gh-aw/awf-config.json
# shellcheck disable=SC1003
sudo -E awf --config "${RUNNER_TEMP}/gh-aw/awf-config.json" --container-workdir "${GITHUB_WORKSPACE}" --mount "${RUNNER_TEMP}/gh-aw:${RUNNER_TEMP}/gh-aw:ro" --mount "${RUNNER_TEMP}/gh-aw:/host${RUNNER_TEMP}/gh-aw:ro" --env-all --exclude-env COPILOT_GITHUB_TOKEN --exclude-env GH_TOKEN --exclude-env GITHUB_MCP_SERVER_TOKEN --exclude-env MCP_GATEWAY_API_KEY --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --audit-dir /tmp/gh-aw/sandbox/firewall/audit --enable-host-access --allow-host-ports 80,443,8080 --skip-pull --difc-proxy-host host.docker.internal:18443 --difc-proxy-ca-cert /tmp/gh-aw/difc-proxy-tls/ca.crt \
-- /bin/bash -c 'export PATH="${RUNNER_TEMP}/gh-aw/mcp-cli/bin:$PATH" && export PATH="$(find /opt/hostedtoolcache /home/runner/work/_tool -maxdepth 4 -type d -name bin 2>/dev/null | tr '\''\n'\'' '\'':'\'')$PATH"; [ -n "$GOROOT" ] && export PATH="$GOROOT/bin:$PATH" || true && GH_AW_NODE_EXEC="${GH_AW_NODE_BIN:-}"; if [ -z "$GH_AW_NODE_EXEC" ] || [ ! -x "$GH_AW_NODE_EXEC" ]; then GH_AW_NODE_EXEC="$(command -v node 2>/dev/null || echo node)"; fi; "$GH_AW_NODE_EXEC" ${RUNNER_TEMP}/gh-aw/actions/copilot_harness.cjs /usr/local/bin/copilot --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --disable-builtin-mcps --no-ask-user --allow-all-tools --allow-all-paths --add-dir "${GITHUB_WORKSPACE}" --prompt-file /tmp/gh-aw/aw-prompts/prompt.txt' 2>&1 | tee -a /tmp/gh-aw/agent-stdio.log
env:
COPILOT_AGENT_RUNNER_TYPE: STANDALONE
COPILOT_API_KEY: dummy-byok-key-for-offline-mode
COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }}
COPILOT_MODEL: claude-haiku-4.5
GH_AW_MCP_CONFIG: /home/runner/.copilot/mcp-config.json
GH_AW_PHASE: agent
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
GH_AW_SAFE_OUTPUTS: ${{ steps.set-runtime-paths.outputs.GH_AW_SAFE_OUTPUTS }}
GH_AW_VERSION: dev
GH_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN || github.token }}
GITHUB_API_URL: ${{ github.api_url }}
GITHUB_AW: true
GITHUB_COPILOT_INTEGRATION_ID: agentic-workflows
GITHUB_HEAD_REF: ${{ github.head_ref }}
GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
GITHUB_REF_NAME: ${{ github.ref_name }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_STEP_SUMMARY: /tmp/gh-aw/agent-step-summary.md
GITHUB_WORKSPACE: ${{ github.workspace }}
GIT_AUTHOR_EMAIL: github-actions[bot]@users.noreply.github.com
GIT_AUTHOR_NAME: github-actions[bot]
GIT_COMMITTER_EMAIL: github-actions[bot]@users.noreply.github.com
GIT_COMMITTER_NAME: github-actions[bot]
XDG_CONFIG_HOME: /home/runner
- name: Stop CLI Proxy
if: always()
continue-on-error: true
run: bash "${RUNNER_TEMP}/gh-aw/actions/stop_cli_proxy.sh"
- name: Detect Copilot errors
id: detect-copilot-errors
if: always()
continue-on-error: true
run: node "${RUNNER_TEMP}/gh-aw/actions/detect_copilot_errors.cjs"
- name: Configure Git credentials
env:
REPO_NAME: ${{ github.repository }}
SERVER_URL: ${{ github.server_url }}
GITHUB_TOKEN: ${{ github.token }}
run: |
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
git config --global am.keepcr true
# Re-authenticate git with GitHub token
SERVER_URL_STRIPPED="${SERVER_URL#https://}"
git remote set-url origin "https://x-access-token:${GITHUB_TOKEN}@${SERVER_URL_STRIPPED}/${REPO_NAME}.git"
echo "Git configured with standard GitHub Actions identity"
- name: Copy Copilot session state files to logs
if: always()
continue-on-error: true
run: bash "${RUNNER_TEMP}/gh-aw/actions/copy_copilot_session_state.sh"
- name: Stop MCP Gateway
if: always()
continue-on-error: true
env:
MCP_GATEWAY_PORT: ${{ steps.start-mcp-gateway.outputs.gateway-port }}
MCP_GATEWAY_API_KEY: ${{ steps.start-mcp-gateway.outputs.gateway-api-key }}
GATEWAY_PID: ${{ steps.start-mcp-gateway.outputs.gateway-pid }}
run: |
bash "${RUNNER_TEMP}/gh-aw/actions/stop_mcp_gateway.sh" "$GATEWAY_PID"
- name: Redact secrets in logs
if: always()
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/redact_secrets.cjs');
await main();
env:
GH_AW_SECRET_NAMES: 'COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN'
SECRET_COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }}
SECRET_GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }}
SECRET_GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }}
SECRET_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Append agent step summary
if: always()
run: bash "${RUNNER_TEMP}/gh-aw/actions/append_agent_step_summary.sh"
- name: Copy Safe Outputs
if: always()
env:
GH_AW_SAFE_OUTPUTS: ${{ steps.set-runtime-paths.outputs.GH_AW_SAFE_OUTPUTS }}
run: |
mkdir -p /tmp/gh-aw
cp "$GH_AW_SAFE_OUTPUTS" /tmp/gh-aw/safeoutputs.jsonl 2>/dev/null || true
- name: Ingest agent output
id: collect_output
if: always()
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_AW_SAFE_OUTPUTS: ${{ steps.set-runtime-paths.outputs.GH_AW_SAFE_OUTPUTS }}
GH_AW_ALLOWED_DOMAINS: "api.business.githubcopilot.com,api.enterprise.githubcopilot.com,api.github.com,api.githubcopilot.com,api.individual.githubcopilot.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,github.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,ppa.launchpad.net,raw.githubusercontent.com,registry.npmjs.org,s.symcb.com,s.symcd.com,security.ubuntu.com,telemetry.enterprise.githubcopilot.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com,www.googleapis.com"
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_API_URL: ${{ github.api_url }}
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/collect_ndjson_output.cjs');
await main();
- name: Parse agent logs for step summary
if: always()
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_AW_AGENT_OUTPUT: /tmp/gh-aw/sandbox/agent/logs/
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/parse_copilot_log.cjs');
await main();
- name: Parse MCP Gateway logs for step summary
if: always()
id: parse-mcp-gateway
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/parse_mcp_gateway_log.cjs');
await main();
- name: Print firewall logs
if: always()
continue-on-error: true
env:
AWF_LOGS_DIR: /tmp/gh-aw/sandbox/firewall/logs
run: |
# Fix permissions on firewall logs/audit dirs so they can be uploaded as artifacts
# AWF runs with sudo, creating files owned by root
sudo chmod -R a+r /tmp/gh-aw/sandbox/firewall 2>/dev/null || true
# Only run awf logs summary if awf command exists (it may not be installed if workflow failed before install step)
if command -v awf &> /dev/null; then
awf logs summary | tee -a "$GITHUB_STEP_SUMMARY"
else
echo 'AWF binary not installed, skipping firewall log summary'
fi
- name: Parse token usage for step summary
if: always()
continue-on-error: true
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/parse_token_usage.cjs');
await main();
- name: Print AWF reflect summary
if: always()
continue-on-error: true
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/awf_reflect_summary.cjs');
await main();
- name: Write agent output placeholder if missing
if: always()
run: |
if [ ! -f /tmp/gh-aw/agent_output.json ]; then
echo '{"items":[]}' > /tmp/gh-aw/agent_output.json
fi
- name: Upload agent artifacts
if: always()
continue-on-error: true
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: agent
path: |
/tmp/gh-aw/aw-prompts/prompt.txt
/tmp/gh-aw/sandbox/agent/logs/
/tmp/gh-aw/redacted-urls.log
/tmp/gh-aw/mcp-logs/
/tmp/gh-aw/proxy-logs/
!/tmp/gh-aw/proxy-logs/proxy-tls/
/tmp/gh-aw/agent_usage.json
/tmp/gh-aw/agent-stdio.log
/tmp/gh-aw/pre-agent-audit.txt
/tmp/gh-aw/agent/
/tmp/gh-aw/github_rate_limits.jsonl
/tmp/gh-aw/safeoutputs.jsonl
/tmp/gh-aw/agent_output.json
/tmp/gh-aw/aw-*.patch
/tmp/gh-aw/aw-*.bundle
/tmp/gh-aw/awf-config.json
/tmp/gh-aw/sandbox/firewall/logs/
/tmp/gh-aw/sandbox/firewall/audit/
/tmp/gh-aw/sandbox/firewall/awf-reflect.json
if-no-files-found: ignore
conclusion:
needs:
- activation
- agent
- detection
- safe_outputs
if: >
always() && (needs.agent.result != 'skipped' || needs.activation.outputs.lockdown_check_failed == 'true' ||
needs.activation.outputs.stale_lock_file_failed == 'true')
runs-on: ubuntu-slim
permissions:
contents: read
discussions: write
issues: write
pull-requests: write
concurrency:
group: "gh-aw-conclusion-issue-monster"
cancel-in-progress: false
outputs:
incomplete_count: ${{ steps.report_incomplete.outputs.incomplete_count }}
noop_message: ${{ steps.noop.outputs.noop_message }}
tools_reported: ${{ steps.missing_tool.outputs.tools_reported }}
total_count: ${{ steps.missing_tool.outputs.total_count }}
steps:
- name: Checkout actions folder
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
repository: github/gh-aw
sparse-checkout: |
actions
persist-credentials: false
- name: Setup Scripts
id: setup
uses: ./actions/setup
with:
destination: ${{ runner.temp }}/gh-aw/actions
job-name: ${{ github.job }}
trace-id: ${{ needs.activation.outputs.setup-trace-id }}
env:
GH_AW_SETUP_WORKFLOW_NAME: "Issue Monster"
GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/issue-monster.lock.yml@${{ github.ref }}
GH_AW_INFO_VERSION: "1.0.40"
- name: Download agent output artifact
id: download-agent-output
continue-on-error: true
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
name: agent
path: /tmp/gh-aw/
- name: Setup agent output environment variable
id: setup-agent-output-env
if: steps.download-agent-output.outcome == 'success'
run: |
mkdir -p /tmp/gh-aw/
find "/tmp/gh-aw/" -type f -print
echo "GH_AW_AGENT_OUTPUT=/tmp/gh-aw/agent_output.json" >> "$GITHUB_OUTPUT"
- name: Process no-op messages
id: noop
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }}
GH_AW_NOOP_MAX: "1"
GH_AW_WORKFLOW_NAME: "Issue Monster"
GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }}
GH_AW_NOOP_REPORT_AS_ISSUE: "true"
with:
github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/handle_noop_message.cjs');
await main();
- name: Log detection run
id: detection_runs
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }}
GH_AW_WORKFLOW_NAME: "Issue Monster"
GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
GH_AW_DETECTION_CONCLUSION: ${{ needs.detection.outputs.detection_conclusion }}
GH_AW_DETECTION_REASON: ${{ needs.detection.outputs.detection_reason }}
with:
github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/handle_detection_runs.cjs');
await main();
- name: Record missing tool
id: missing_tool
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }}
GH_AW_MISSING_TOOL_CREATE_ISSUE: "true"
GH_AW_WORKFLOW_NAME: "Issue Monster"
with:
github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/missing_tool.cjs');
await main();
- name: Record incomplete
id: report_incomplete
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }}
GH_AW_REPORT_INCOMPLETE_CREATE_ISSUE: "true"
GH_AW_WORKFLOW_NAME: "Issue Monster"
with:
github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/report_incomplete_handler.cjs');
await main();
- name: Handle agent failure
id: handle_agent_failure
if: always()
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }}
GH_AW_WORKFLOW_NAME: "Issue Monster"
GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }}
GH_AW_WORKFLOW_ID: "issue-monster"
GH_AW_ACTION_FAILURE_ISSUE_EXPIRES_HOURS: "12"
GH_AW_ENGINE_ID: "copilot"
GH_AW_SECRET_VERIFICATION_RESULT: ${{ needs.activation.outputs.secret_verification_result }}
GH_AW_CHECKOUT_PR_SUCCESS: ${{ needs.agent.outputs.checkout_pr_success }}
GH_AW_INFERENCE_ACCESS_ERROR: ${{ needs.agent.outputs.inference_access_error }}
GH_AW_MCP_POLICY_ERROR: ${{ needs.agent.outputs.mcp_policy_error }}
GH_AW_AGENTIC_ENGINE_TIMEOUT: ${{ needs.agent.outputs.agentic_engine_timeout }}
GH_AW_MODEL_NOT_SUPPORTED_ERROR: ${{ needs.agent.outputs.model_not_supported_error }}
GH_AW_ENGINE_API_HOSTS: "api.enterprise.githubcopilot.com,api.githubcopilot.com,api.business.githubcopilot.com,api.individual.githubcopilot.com"
GH_AW_ASSIGNMENT_ERRORS: ${{ needs.safe_outputs.outputs.assign_to_agent_assignment_errors }}
GH_AW_ASSIGNMENT_ERROR_COUNT: ${{ needs.safe_outputs.outputs.assign_to_agent_assignment_error_count }}
GH_AW_LOCKDOWN_CHECK_FAILED: ${{ needs.activation.outputs.lockdown_check_failed }}
GH_AW_STALE_LOCK_FILE_FAILED: ${{ needs.activation.outputs.stale_lock_file_failed }}
GH_AW_SAFE_OUTPUT_MESSAGES: "{\"footer\":\"\\u003e 🍪 *Om nom nom by [{workflow_name}]({run_url})*{effective_tokens_suffix}{history_link}\",\"runStarted\":\"🍪 ISSUE! ISSUE! [{workflow_name}]({run_url}) hungry for issues on this {event_type}! Om nom nom...\",\"runSuccess\":\"🍪 YUMMY! [{workflow_name}]({run_url}) ate the issues! That was DELICIOUS! Me want MORE! 😋\",\"runFailure\":\"🍪 Aww... [{workflow_name}]({run_url}) {status}. No cookie for monster today... 😢\"}"
GH_AW_GROUP_REPORTS: "false"
GH_AW_FAILURE_REPORT_AS_ISSUE: "true"
GH_AW_MISSING_TOOL_REPORT_AS_FAILURE: "true"
GH_AW_MISSING_DATA_REPORT_AS_FAILURE: "true"
GH_AW_TIMEOUT_MINUTES: "30"
with:
github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/handle_agent_failure.cjs');
await main();
detection:
needs:
- activation
- agent
if: >
always() && needs.agent.result != 'skipped' && (needs.agent.outputs.output_types != '' || needs.agent.outputs.has_patch == 'true')
runs-on: ubuntu-latest
permissions:
contents: read
outputs:
detection_conclusion: ${{ steps.detection_conclusion.outputs.conclusion }}
detection_reason: ${{ steps.detection_conclusion.outputs.reason }}
detection_success: ${{ steps.detection_conclusion.outputs.success }}
steps:
- name: Checkout actions folder
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
repository: github/gh-aw
sparse-checkout: |
actions
persist-credentials: false
- name: Setup Scripts
id: setup
uses: ./actions/setup
with:
destination: ${{ runner.temp }}/gh-aw/actions
job-name: ${{ github.job }}
trace-id: ${{ needs.activation.outputs.setup-trace-id }}
env:
GH_AW_SETUP_WORKFLOW_NAME: "Issue Monster"
GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/issue-monster.lock.yml@${{ github.ref }}
GH_AW_INFO_VERSION: "1.0.40"
- name: Download agent output artifact
id: download-agent-output
continue-on-error: true
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
name: agent
path: /tmp/gh-aw/
- name: Setup agent output environment variable
id: setup-agent-output-env
if: steps.download-agent-output.outcome == 'success'
run: |
mkdir -p /tmp/gh-aw/
find "/tmp/gh-aw/" -type f -print
echo "GH_AW_AGENT_OUTPUT=/tmp/gh-aw/agent_output.json" >> "$GITHUB_OUTPUT"
- name: Checkout repository for patch context
if: needs.agent.outputs.has_patch == 'true'
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
# --- Threat Detection ---
- name: Clean stale firewall files from agent artifact
run: |
rm -rf /tmp/gh-aw/sandbox/firewall/logs
rm -rf /tmp/gh-aw/sandbox/firewall/audit
- name: Download container images
run: bash "${RUNNER_TEMP}/gh-aw/actions/download_docker_images.sh" ghcr.io/github/gh-aw-firewall/agent:0.25.35 ghcr.io/github/gh-aw-firewall/api-proxy:0.25.35 ghcr.io/github/gh-aw-firewall/squid:0.25.35
- name: Check if detection needed
id: detection_guard
if: always()
env:
OUTPUT_TYPES: ${{ needs.agent.outputs.output_types }}
HAS_PATCH: ${{ needs.agent.outputs.has_patch }}
run: |
if [[ -n "$OUTPUT_TYPES" || "$HAS_PATCH" == "true" ]]; then
echo "run_detection=true" >> "$GITHUB_OUTPUT"
echo "Detection will run: output_types=$OUTPUT_TYPES, has_patch=$HAS_PATCH"
else
echo "run_detection=false" >> "$GITHUB_OUTPUT"
echo "Detection skipped: no agent outputs or patches to analyze"
fi
- name: Clear MCP Config for detection
if: always() && steps.detection_guard.outputs.run_detection == 'true'
run: |
rm -f "${RUNNER_TEMP}/gh-aw/mcp-config/mcp-servers.json"
rm -f /home/runner/.copilot/mcp-config.json
rm -f "$GITHUB_WORKSPACE/.gemini/settings.json"
- name: Prepare threat detection files
if: always() && steps.detection_guard.outputs.run_detection == 'true'
run: |
mkdir -p /tmp/gh-aw/threat-detection/aw-prompts
cp /tmp/gh-aw/aw-prompts/prompt.txt /tmp/gh-aw/threat-detection/aw-prompts/prompt.txt 2>/dev/null || true
cp /tmp/gh-aw/agent_output.json /tmp/gh-aw/threat-detection/agent_output.json 2>/dev/null || true
for f in /tmp/gh-aw/aw-*.patch; do
[ -f "$f" ] && cp "$f" /tmp/gh-aw/threat-detection/ 2>/dev/null || true
done
for f in /tmp/gh-aw/aw-*.bundle; do
[ -f "$f" ] && cp "$f" /tmp/gh-aw/threat-detection/ 2>/dev/null || true
done
echo "Prepared threat detection files:"
ls -la /tmp/gh-aw/threat-detection/ 2>/dev/null || true
- name: Setup threat detection
if: always() && steps.detection_guard.outputs.run_detection == 'true'
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
WORKFLOW_NAME: "Issue Monster"
WORKFLOW_DESCRIPTION: "The Cookie Monster of issues - assigns issues to Copilot coding agent one at a time"
HAS_PATCH: ${{ needs.agent.outputs.has_patch }}
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/setup_threat_detection.cjs');
await main();
- name: Ensure threat-detection directory and log
if: always() && steps.detection_guard.outputs.run_detection == 'true'
run: |
mkdir -p /tmp/gh-aw/threat-detection
touch /tmp/gh-aw/threat-detection/detection.log
- name: Setup Node.js
uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0
with:
node-version: '24'
package-manager-cache: false
- name: Install GitHub Copilot CLI
run: bash "${RUNNER_TEMP}/gh-aw/actions/install_copilot_cli.sh" 1.0.40
env:
GH_HOST: github.com
- name: Install AWF binary
run: bash "${RUNNER_TEMP}/gh-aw/actions/install_awf_binary.sh" v0.25.35
- name: Execute GitHub Copilot CLI
if: always() && steps.detection_guard.outputs.run_detection == 'true'
continue-on-error: true
id: detection_agentic_execution
# Copilot CLI tool arguments (sorted):
timeout-minutes: 20
run: |
set -o pipefail
touch /tmp/gh-aw/agent-step-summary.md
GH_AW_NODE_BIN=$(command -v node 2>/dev/null || true)
export GH_AW_NODE_BIN
(umask 177 && touch /tmp/gh-aw/threat-detection/detection.log)
printf '%s\n' '{"$schema":"https://github.com/github/gh-aw-firewall/releases/download/v0.25.35/awf-config.schema.json","network":{"allowDomains":["api.business.githubcopilot.com","api.enterprise.githubcopilot.com","api.github.com","api.githubcopilot.com","api.individual.githubcopilot.com","github.com","host.docker.internal","telemetry.enterprise.githubcopilot.com"]},"apiProxy":{"enabled":true},"container":{"imageTag":"0.25.35"}}' > "${RUNNER_TEMP}/gh-aw/awf-config.json" && cp "${RUNNER_TEMP}/gh-aw/awf-config.json" /tmp/gh-aw/awf-config.json
# shellcheck disable=SC1003
sudo -E awf --config "${RUNNER_TEMP}/gh-aw/awf-config.json" --container-workdir "${GITHUB_WORKSPACE}" --mount "${RUNNER_TEMP}/gh-aw:${RUNNER_TEMP}/gh-aw:ro" --mount "${RUNNER_TEMP}/gh-aw:/host${RUNNER_TEMP}/gh-aw:ro" --env-all --exclude-env COPILOT_GITHUB_TOKEN --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --audit-dir /tmp/gh-aw/sandbox/firewall/audit --enable-host-access --allow-host-ports 80,443,8080 --skip-pull \
-- /bin/bash -c 'export PATH="$(find /opt/hostedtoolcache /home/runner/work/_tool -maxdepth 4 -type d -name bin 2>/dev/null | tr '\''\n'\'' '\'':'\'')$PATH"; [ -n "$GOROOT" ] && export PATH="$GOROOT/bin:$PATH" || true && GH_AW_NODE_EXEC="${GH_AW_NODE_BIN:-}"; if [ -z "$GH_AW_NODE_EXEC" ] || [ ! -x "$GH_AW_NODE_EXEC" ]; then GH_AW_NODE_EXEC="$(command -v node 2>/dev/null || echo node)"; fi; "$GH_AW_NODE_EXEC" ${RUNNER_TEMP}/gh-aw/actions/copilot_harness.cjs /usr/local/bin/copilot --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --disable-builtin-mcps --no-ask-user --allow-all-tools --add-dir "${GITHUB_WORKSPACE}" --prompt-file /tmp/gh-aw/aw-prompts/prompt.txt' 2>&1 | tee -a /tmp/gh-aw/threat-detection/detection.log
env:
COPILOT_AGENT_RUNNER_TYPE: STANDALONE
COPILOT_API_KEY: dummy-byok-key-for-offline-mode
COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }}
COPILOT_MODEL: claude-haiku-4.5
GH_AW_PHASE: detection
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
GH_AW_VERSION: dev
GITHUB_API_URL: ${{ github.api_url }}
GITHUB_AW: true
GITHUB_COPILOT_INTEGRATION_ID: agentic-workflows
GITHUB_HEAD_REF: ${{ github.head_ref }}
GITHUB_REF_NAME: ${{ github.ref_name }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_STEP_SUMMARY: /tmp/gh-aw/agent-step-summary.md
GITHUB_WORKSPACE: ${{ github.workspace }}
GIT_AUTHOR_EMAIL: github-actions[bot]@users.noreply.github.com
GIT_AUTHOR_NAME: github-actions[bot]
GIT_COMMITTER_EMAIL: github-actions[bot]@users.noreply.github.com
GIT_COMMITTER_NAME: github-actions[bot]
XDG_CONFIG_HOME: /home/runner
- name: Upload threat detection log
if: always() && steps.detection_guard.outputs.run_detection == 'true'
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: detection
path: /tmp/gh-aw/threat-detection/detection.log
if-no-files-found: ignore
- name: Parse and conclude threat detection
id: detection_conclusion
if: always()
continue-on-error: true
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
RUN_DETECTION: ${{ steps.detection_guard.outputs.run_detection }}
GH_AW_DETECTION_CONTINUE_ON_ERROR: "true"
with:
script: |
try {
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/parse_threat_detection_results.cjs');
await main();
} catch (loadErr) {
const continueOnError = process.env.GH_AW_DETECTION_CONTINUE_ON_ERROR !== 'false';
const msg = 'ERR_SYSTEM: \u274C Unexpected error loading threat detection module: ' + (loadErr && loadErr.message ? loadErr.message : String(loadErr));
core.error(msg);
core.setOutput('reason', 'parse_error');
if (continueOnError) {
core.warning('\u26A0\uFE0F ' + msg);
core.setOutput('conclusion', 'warning');
core.setOutput('success', 'false');
} else {
core.setOutput('conclusion', 'failure');
core.setOutput('success', 'false');
core.setFailed(msg);
}
}
pre_activation:
runs-on: ubuntu-slim
permissions:
contents: read
issues: read
pull-requests: read
outputs:
activated: ${{ steps.check_membership.outputs.is_team_member == 'true' && steps.check_skip_if_match.outputs.skip_check_ok == 'true' && steps.check_skip_if_no_match.outputs.skip_no_match_check_ok == 'true' && steps.check_skip_if_check_failing.outputs.skip_if_check_failing_ok == 'true' }}
has_issues: ${{ steps.search.outputs.has_issues }}
issue_context: ${{ steps.search.outputs.issue_context }}
issue_count: ${{ steps.search.outputs.issue_count }}
issue_list: ${{ steps.search.outputs.issue_list }}
issue_numbers: ${{ steps.search.outputs.issue_numbers }}
matched_command: ''
search_result: ${{ steps.search.outcome }}
setup-trace-id: ${{ steps.setup.outputs.trace-id }}
steps:
- name: Checkout actions folder
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
repository: github/gh-aw
sparse-checkout: |
actions
persist-credentials: false
- name: Setup Scripts
id: setup
uses: ./actions/setup
with:
destination: ${{ runner.temp }}/gh-aw/actions
job-name: ${{ github.job }}
env:
GH_AW_SETUP_WORKFLOW_NAME: "Issue Monster"
GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/issue-monster.lock.yml@${{ github.ref }}
GH_AW_INFO_VERSION: "1.0.40"
- name: Check team membership for workflow
id: check_membership
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_AW_REQUIRED_ROLES: "admin,maintainer,write"
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/check_membership.cjs');
await main();
- name: Check skip-if-match query
id: check_skip_if_match
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_AW_SKIP_QUERY: "is:pr is:open is:draft author:app/copilot-swe-agent"
GH_AW_WORKFLOW_NAME: "Issue Monster"
GH_AW_SKIP_MAX_MATCHES: "5"
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/check_skip_if_match.cjs');
await main();
- name: Check skip-if-no-match query
id: check_skip_if_no_match
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_AW_SKIP_QUERY: "is:issue is:open"
GH_AW_WORKFLOW_NAME: "Issue Monster"
GH_AW_SKIP_MIN_MATCHES: "1"
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/check_skip_if_no_match.cjs');
await main();
- name: Check skip-if-check-failing
id: check_skip_if_check_failing
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_AW_SKIP_CHECK_INCLUDE: "[\"build\",\"test\",\"lint-go\",\"lint-js\"]"
GH_AW_SKIP_CHECK_ALLOW_PENDING: "true"
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/check_skip_if_check_failing.cjs');
await main();
- name: Search for candidate issues
id: search
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9
with:
script: |
const { owner, repo } = context.repo;
const MAX_ISSUES_WITH_BODY_CONTEXT = 8;
const BODY_SNIPPET_MAX_LENGTH = 600;
try {
// Check for recent rate-limited PRs to avoid scheduling more work during rate limiting
core.info('Checking for recent rate-limited PRs...');
const rateLimitCheckDate = new Date();
rateLimitCheckDate.setHours(rateLimitCheckDate.getHours() - 1); // Check last hour
// Format as YYYY-MM-DDTHH:MM:SS for GitHub search API
const rateLimitCheckISO = rateLimitCheckDate.toISOString().split('.')[0] + 'Z';
const recentPRsQuery = `is:pr author:app/copilot-swe-agent created:>${rateLimitCheckISO} repo:${owner}/${repo}`;
const recentPRsResponse = await github.rest.search.issuesAndPullRequests({
q: recentPRsQuery,
per_page: 10,
sort: 'created',
order: 'desc'
});
core.info(`Found ${recentPRsResponse.data.total_count} recent Copilot PRs to check for rate limiting`);
// Check if any recent PRs have rate limit indicators
let rateLimitDetected = false;
for (const pr of recentPRsResponse.data.items) {
try {
const prTimelineQuery = `
query($owner: String!, $repo: String!, $number: Int!) {
repository(owner: $owner, name: $repo) {
pullRequest(number: $number) {
timelineItems(first: 50, itemTypes: [ISSUE_COMMENT]) {
nodes {
__typename
... on IssueComment {
body
createdAt
}
}
}
}
}
}
`;
const prTimelineResult = await github.graphql(prTimelineQuery, {
owner,
repo,
number: pr.number
});
const comments = prTimelineResult?.repository?.pullRequest?.timelineItems?.nodes || [];
const rateLimitPattern = /rate limit|API rate limit|secondary rate limit|abuse detection|429|too many requests/i;
for (const comment of comments) {
if (comment.body && rateLimitPattern.test(comment.body)) {
core.warning(`Rate limiting detected in PR #${pr.number}: ${comment.body.substring(0, 200)}`);
rateLimitDetected = true;
break;
}
}
if (rateLimitDetected) break;
} catch (error) {
core.warning(`Could not check PR #${pr.number} for rate limiting: ${error.message}`);
}
}
if (rateLimitDetected) {
core.warning('🛑 Rate limiting detected in recent PRs. Skipping issue assignment to prevent further rate limit issues.');
core.setOutput('issue_count', 0);
core.setOutput('issue_numbers', '');
core.setOutput('issue_list', '');
core.setOutput('issue_context', '');
core.setOutput('has_issues', 'false');
return;
}
core.info('✓ No rate limiting detected. Proceeding with issue search.');
// Labels that indicate an issue should NOT be auto-assigned
const excludeLabels = [
'wontfix',
'duplicate',
'invalid',
'question',
'discussion',
'needs-discussion',
'blocked',
'on-hold',
'waiting-for-feedback',
'needs-more-info',
'no-bot',
'no-campaign'
];
// Labels that indicate an issue is a GOOD candidate for auto-assignment
const priorityLabels = [
'good first issue',
'good-first-issue',
'bug',
'enhancement',
'feature',
'documentation',
'tech-debt',
'refactoring',
'performance',
'security'
];
// Search for open issues with "cookie" label and without excluded labels
// The "cookie" label indicates issues that are approved work queue items from automated workflows
const query = `is:issue is:open repo:${owner}/${repo} label:cookie -label:"${excludeLabels.join('" -label:"')}"`;
core.info(`Searching: ${query}`);
const response = await github.rest.search.issuesAndPullRequests({
q: query,
per_page: 100,
sort: 'created',
order: 'desc'
});
core.info(`Found ${response.data.total_count} total issues matching basic criteria`);
// Fetch full details for each issue to get labels, assignees, sub-issues, and linked PRs
// Track integrity-filtered issues to emit a diagnostic summary
const integrityFilteredIssues = [];
const issuesWithDetails = (await Promise.all(
response.data.items.map(async (issue) => {
// Fetch full issue details — some issues may be blocked by integrity policy
let fullIssue;
try {
fullIssue = await github.rest.issues.get({
owner,
repo,
issue_number: issue.number
});
} catch (fetchError) {
// Integrity-filtered issues (403/451) or other transient errors should be
// skipped individually rather than failing the entire batch
const status = fetchError.status || fetchError.response?.status;
// 403 = Forbidden (integrity policy), 451 = Unavailable For Legal Reasons
const isIntegrityBlock = status === 403 || status === 451 ||
/\bintegrity\b/i.test(fetchError.message || '');
const errorSummary = (fetchError.message || String(fetchError)).slice(0, 120);
if (isIntegrityBlock) {
integrityFilteredIssues.push(issue.number);
core.warning(`⚠️ Skipping issue #${issue.number}: blocked by integrity policy (HTTP ${status || 'unknown'}): ${errorSummary}`);
} else {
core.warning(`⚠️ Skipping issue #${issue.number}: could not fetch details (HTTP ${status || 'unknown'}): ${errorSummary}`);
}
return null;
}
// Check if this issue has sub-issues and linked PRs using GraphQL
let subIssuesCount = 0;
let linkedPRs = [];
try {
const issueDetailsQuery = `
query($owner: String!, $repo: String!, $number: Int!) {
repository(owner: $owner, name: $repo) {
issue(number: $number) {
subIssues {
totalCount
}
timelineItems(first: 100, itemTypes: [CROSS_REFERENCED_EVENT]) {
nodes {
... on CrossReferencedEvent {
source {
__typename
... on PullRequest {
number
state
isDraft
author {
login
}
}
}
}
}
}
}
}
}
`;
const issueDetailsResult = await github.graphql(issueDetailsQuery, {
owner,
repo,
number: issue.number
});
subIssuesCount = issueDetailsResult?.repository?.issue?.subIssues?.totalCount || 0;
// Extract linked PRs from timeline
const timelineItems = issueDetailsResult?.repository?.issue?.timelineItems?.nodes || [];
linkedPRs = timelineItems
.filter(item => item?.source?.__typename === 'PullRequest')
.map(item => ({
number: item.source.number,
state: item.source.state,
isDraft: item.source.isDraft,
author: item.source.author?.login
}));
core.info(`Issue #${issue.number} has ${linkedPRs.length} linked PR(s)`);
} catch (error) {
// If GraphQL query fails, continue with defaults
core.warning(`Could not check details for #${issue.number}: ${error.message}`);
}
return {
...fullIssue.data,
subIssuesCount,
linkedPRs
};
})
)).filter(Boolean); // Remove null entries (integrity-filtered or otherwise skipped)
// Emit diagnostic summary for integrity-filtered issues
if (integrityFilteredIssues.length > 0) {
core.warning(`🛡️ Integrity filter diagnostic: ${integrityFilteredIssues.length} issue(s) were skipped due to integrity policy: #${integrityFilteredIssues.join(', #')}. These issues will be excluded from this run.`);
}
// Filter and score issues
const scoredIssues = issuesWithDetails
.filter(issue => {
// Exclude issues that already have assignees
if (issue.assignees && issue.assignees.length > 0) {
core.info(`Skipping #${issue.number}: already has assignees`);
return false;
}
// Exclude issues with excluded labels (double check)
const issueLabels = issue.labels.map(l => l.name.toLowerCase());
if (issueLabels.some(label => excludeLabels.map(l => l.toLowerCase()).includes(label))) {
core.info(`Skipping #${issue.number}: has excluded label`);
return false;
}
// Exclude issues with campaign labels (campaign:*)
// Campaign items are managed by campaign orchestrators
if (issueLabels.some(label => label.startsWith('campaign:'))) {
core.info(`Skipping #${issue.number}: has campaign label (managed by campaign orchestrator)`);
return false;
}
// Exclude issues that have sub-issues (parent/organizing issues)
if (issue.subIssuesCount > 0) {
core.info(`Skipping #${issue.number}: has ${issue.subIssuesCount} sub-issue(s) - parent issues are used for organizing, not tasks`);
return false;
}
// Exclude issues with closed PRs (treat as complete)
const closedPRs = issue.linkedPRs?.filter(pr => pr.state === 'CLOSED' || pr.state === 'MERGED') || [];
if (closedPRs.length > 0) {
core.info(`Skipping #${issue.number}: has ${closedPRs.length} closed/merged PR(s) - treating as complete`);
return false;
}
// Exclude issues with open PRs from Copilot coding agent
const openCopilotPRs = issue.linkedPRs?.filter(pr =>
pr.state === 'OPEN' &&
(pr.author === 'copilot-swe-agent' || pr.author?.includes('copilot'))
) || [];
if (openCopilotPRs.length > 0) {
core.info(`Skipping #${issue.number}: has ${openCopilotPRs.length} open PR(s) from Copilot - already being worked on`);
return false;
}
return true;
})
.map(issue => {
const issueLabels = issue.labels.map(l => l.name.toLowerCase());
let score = 0;
// Score based on priority labels (higher score = higher priority)
if (issueLabels.includes('good first issue') || issueLabels.includes('good-first-issue')) {
score += 50;
}
if (issueLabels.includes('bug')) {
score += 40;
}
if (issueLabels.includes('security')) {
score += 45;
}
if (issueLabels.includes('documentation')) {
score += 35;
}
if (issueLabels.includes('enhancement') || issueLabels.includes('feature')) {
score += 30;
}
if (issueLabels.includes('performance')) {
score += 25;
}
if (issueLabels.includes('tech-debt') || issueLabels.includes('refactoring')) {
score += 20;
}
// Bonus for issues with clear labels (any priority label)
if (issueLabels.some(label => priorityLabels.map(l => l.toLowerCase()).includes(label))) {
score += 10;
}
// Age bonus: older issues get slight priority (days old / 10)
const ageInDays = Math.floor((Date.now() - new Date(issue.created_at)) / (1000 * 60 * 60 * 24));
score += Math.min(ageInDays / 10, 20); // Cap age bonus at 20 points
return {
number: issue.number,
title: issue.title,
labels: issue.labels.map(l => l.name),
body: issue.body,
created_at: issue.created_at,
score
};
})
.sort((a, b) => b.score - a.score); // Sort by score descending
// Format output
const issueList = scoredIssues.map(i => {
const labelStr = i.labels.length > 0 ? ` [${i.labels.join(', ')}]` : '';
return `#${i.number}: ${i.title}${labelStr} (score: ${i.score.toFixed(1)})`;
}).join('\n');
// Pre-fetch compact body context for top candidates so the agent can
// triage without extra reads in most runs.
const issueContext = scoredIssues.slice(0, MAX_ISSUES_WITH_BODY_CONTEXT).map(i => {
const body = (i.body || '').replace(/\s+/g, ' ').trim();
const bodySnippet = body.length > BODY_SNIPPET_MAX_LENGTH ? `${body.slice(0, BODY_SNIPPET_MAX_LENGTH)}…` : body;
const labelStr = i.labels.length > 0 ? i.labels.join(', ') : 'none';
return `#${i.number} | score=${i.score.toFixed(1)} | labels=${labelStr}\nTitle: ${i.title}\nBody: ${bodySnippet || '(no body)'}`;
}).join('\n\n---\n\n');
const issueNumbers = scoredIssues.map(i => i.number).join(',');
core.info(`Total candidate issues after filtering: ${scoredIssues.length}`);
if (scoredIssues.length > 0) {
core.info(`Top candidates:\n${issueList.split('\n').slice(0, 10).join('\n')}`);
}
core.setOutput('issue_count', scoredIssues.length);
core.setOutput('issue_numbers', issueNumbers);
core.setOutput('issue_list', issueList);
core.setOutput('issue_context', issueContext);
if (scoredIssues.length === 0) {
core.info('🍽️ No suitable candidate issues - the plate is empty!');
core.setOutput('has_issues', 'false');
} else {
core.setOutput('has_issues', 'true');
}
} catch (error) {
core.error(`Error searching for issues: ${error.message}`);
core.setOutput('issue_count', 0);
core.setOutput('issue_numbers', '');
core.setOutput('issue_list', '');
core.setOutput('issue_context', '');
core.setOutput('has_issues', 'false');
}
safe_outputs:
needs:
- activation
- agent
- detection
if: (!cancelled()) && needs.agent.result != 'skipped' && needs.detection.result == 'success'
runs-on: ubuntu-slim
permissions:
contents: read
discussions: write
issues: write
pull-requests: write
timeout-minutes: 15
env:
GH_AW_CALLER_WORKFLOW_ID: "${{ github.repository }}/issue-monster"
GH_AW_DETECTION_CONCLUSION: ${{ needs.detection.outputs.detection_conclusion }}
GH_AW_DETECTION_REASON: ${{ needs.detection.outputs.detection_reason }}
GH_AW_EFFECTIVE_TOKENS: ${{ needs.agent.outputs.effective_tokens }}
GH_AW_ENGINE_ID: "copilot"
GH_AW_ENGINE_MODEL: "claude-haiku-4.5"
GH_AW_ENGINE_VERSION: "1.0.40"
GH_AW_SAFE_OUTPUT_MESSAGES: "{\"footer\":\"\\u003e 🍪 *Om nom nom by [{workflow_name}]({run_url})*{effective_tokens_suffix}{history_link}\",\"runStarted\":\"🍪 ISSUE! ISSUE! [{workflow_name}]({run_url}) hungry for issues on this {event_type}! Om nom nom...\",\"runSuccess\":\"🍪 YUMMY! [{workflow_name}]({run_url}) ate the issues! That was DELICIOUS! Me want MORE! 😋\",\"runFailure\":\"🍪 Aww... [{workflow_name}]({run_url}) {status}. No cookie for monster today... 😢\"}"
GH_AW_WORKFLOW_ID: "issue-monster"
GH_AW_WORKFLOW_NAME: "Issue Monster"
outputs:
assign_to_agent_assigned: ${{ steps.process_safe_outputs.outputs.assign_to_agent_assigned }}
assign_to_agent_assignment_error_count: ${{ steps.process_safe_outputs.outputs.assign_to_agent_assignment_error_count }}
assign_to_agent_assignment_errors: ${{ steps.process_safe_outputs.outputs.assign_to_agent_assignment_errors }}
code_push_failure_count: ${{ steps.process_safe_outputs.outputs.code_push_failure_count }}
code_push_failure_errors: ${{ steps.process_safe_outputs.outputs.code_push_failure_errors }}
comment_id: ${{ steps.process_safe_outputs.outputs.comment_id }}
comment_url: ${{ steps.process_safe_outputs.outputs.comment_url }}
create_discussion_error_count: ${{ steps.process_safe_outputs.outputs.create_discussion_error_count }}
create_discussion_errors: ${{ steps.process_safe_outputs.outputs.create_discussion_errors }}
process_safe_outputs_processed_count: ${{ steps.process_safe_outputs.outputs.processed_count }}
process_safe_outputs_temporary_id_map: ${{ steps.process_safe_outputs.outputs.temporary_id_map }}
steps:
- name: Checkout actions folder
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
repository: github/gh-aw
sparse-checkout: |
actions
persist-credentials: false
- name: Setup Scripts
id: setup
uses: ./actions/setup
with:
destination: ${{ runner.temp }}/gh-aw/actions
job-name: ${{ github.job }}
trace-id: ${{ needs.activation.outputs.setup-trace-id }}
env:
GH_AW_SETUP_WORKFLOW_NAME: "Issue Monster"
GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/issue-monster.lock.yml@${{ github.ref }}
GH_AW_INFO_VERSION: "1.0.40"
- name: Download agent output artifact
id: download-agent-output
continue-on-error: true
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
name: agent
path: /tmp/gh-aw/
- name: Setup agent output environment variable
id: setup-agent-output-env
if: steps.download-agent-output.outcome == 'success'
run: |
mkdir -p /tmp/gh-aw/
find "/tmp/gh-aw/" -type f -print
echo "GH_AW_AGENT_OUTPUT=/tmp/gh-aw/agent_output.json" >> "$GITHUB_OUTPUT"
- name: Configure GH_HOST for enterprise compatibility
id: ghes-host-config
shell: bash
run: |
# Derive GH_HOST from GITHUB_SERVER_URL so the gh CLI targets the correct
# GitHub instance (GHES/GHEC). On github.com this is a harmless no-op.
GH_HOST="${GITHUB_SERVER_URL#https://}"
GH_HOST="${GH_HOST#http://}"
echo "GH_HOST=${GH_HOST}" >> "$GITHUB_ENV"
- name: Process Safe Outputs
id: process_safe_outputs
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }}
GH_AW_ALLOWED_DOMAINS: "api.business.githubcopilot.com,api.enterprise.githubcopilot.com,api.github.com,api.githubcopilot.com,api.individual.githubcopilot.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,github.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,ppa.launchpad.net,raw.githubusercontent.com,registry.npmjs.org,s.symcb.com,s.symcd.com,security.ubuntu.com,telemetry.enterprise.githubcopilot.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com,www.googleapis.com"
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_API_URL: ${{ github.api_url }}
GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: "{\"add_comment\":{\"max\":3,\"target\":\"*\"},\"assign_to_agent\":{\"allowed\":[\"copilot\"],\"max\":3,\"target\":\"*\"},\"create_report_incomplete_issue\":{},\"missing_data\":{},\"missing_tool\":{},\"noop\":{\"max\":1,\"report-as-issue\":\"true\"},\"report_incomplete\":{}}"
GH_AW_ASSIGN_TO_AGENT_TOKEN: ${{ secrets.GH_AW_AGENT_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
with:
github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/safe_output_handler_manager.cjs');
await main();
- name: Upload Safe Outputs Items
if: always()
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: safe-outputs-items
path: |
/tmp/gh-aw/safe-output-items.jsonl
/tmp/gh-aw/temporary-id-map.json
if-no-files-found: ignore