Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 36 additions & 7 deletions cadence/contracts/FlowYieldVaultsAutoBalancers.cdc
Original file line number Diff line number Diff line change
Expand Up @@ -81,22 +81,45 @@ access(all) contract FlowYieldVaultsAutoBalancers {
return nil
}

/// Checks if an AutoBalancer has at least one active (Scheduled) transaction.
/// Checks if an AutoBalancer has at least one active internally-managed transaction.
/// Used by Supervisor to detect stuck yield vaults that need recovery.
///
/// A transaction is considered active when it is:
/// - still `Scheduled`, or
/// - already marked `Executed` by FlowTransactionScheduler and still within a bounded
/// grace period after its scheduled timestamp.
///
/// The second case matters because FlowTransactionScheduler flips status to `Executed`
/// before the handler actually runs. Without treating that in-flight window as active,
/// the Supervisor can falsely classify healthy vaults as stuck and recover them twice.
/// But that window must be bounded: if the handler panics after the optimistic status
/// update, the vault must eventually become recoverable instead of remaining "active"
/// forever.
///
/// @param id: The yield vault/AutoBalancer ID
/// @return Bool: true if there's at least one Scheduled transaction, false otherwise
/// @return Bool: true if there's at least one active internally-managed transaction, false otherwise
///
access(all) fun hasActiveSchedule(id: UInt64): Bool {
let autoBalancer = self.borrowAutoBalancer(id: id)
if autoBalancer == nil {
return false
}

let currentTimestamp = getCurrentBlock().timestamp
let optimisticExecutionGracePeriod: UFix64 = 15.0
let txnIDs = autoBalancer!.getScheduledTransactionIDs()
for txnID in txnIDs {
if autoBalancer!.borrowScheduledTransaction(id: txnID)?.status() == FlowTransactionScheduler.Status.Scheduled {
return true
if let scheduledTxn = autoBalancer!.borrowScheduledTransaction(id: txnID) {
if let status = scheduledTxn.status() {
if status == FlowTransactionScheduler.Status.Scheduled {
return true
}

if status == FlowTransactionScheduler.Status.Executed
&& currentTimestamp <= scheduledTxn.timestamp + optimisticExecutionGracePeriod {
return true
}
}
}
}
return false
Expand All @@ -123,7 +146,7 @@ access(all) contract FlowYieldVaultsAutoBalancers {
return false // Not configured for recurring, can't be "stuck"
}

// Check if there's an active schedule
// Check if there's an active schedule or an in-flight due execution
if self.hasActiveSchedule(id: id) {
return false // Has active schedule, not stuck
}
Expand Down Expand Up @@ -226,8 +249,14 @@ access(all) contract FlowYieldVaultsAutoBalancers {
let scheduleCap = self.account.capabilities.storage
.issue<auth(DeFiActions.Schedule) &DeFiActions.AutoBalancer>(storagePath)

// Register yield vault in registry for global mapping of live yield vault IDs
FlowYieldVaultsSchedulerRegistry.register(yieldVaultID: uniqueID.id, handlerCap: handlerCap, scheduleCap: scheduleCap)
// Register the yield vault in the global scheduler registry.
// Only recurring vaults participate in the Supervisor's stuck-scan ordering.
FlowYieldVaultsSchedulerRegistry.register(
yieldVaultID: uniqueID.id,
handlerCap: handlerCap,
scheduleCap: scheduleCap,
participatesInStuckScan: recurringConfig != nil
)

// Start the native AutoBalancer self-scheduling chain if recurringConfig was provided
// This schedules the first rebalance; subsequent ones are scheduled automatically
Expand Down
74 changes: 56 additions & 18 deletions cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,12 @@ import "UInt64LinkedList"

/// FlowYieldVaultsSchedulerRegistry
///
/// Stores registry of YieldVault IDs and their handler capabilities for scheduling.
/// Stores the global registry of live YieldVault IDs and their scheduling capabilities.
/// This contract maintains:
/// - A registry of all yield vault IDs that participate in scheduled rebalancing
/// - A registry of all live yield vault IDs known to the scheduler infrastructure
/// - Handler capabilities (AutoBalancer capabilities) for each yield vault
/// - A pending queue for yield vaults that need initial seeding or re-seeding
/// - A recurring-only stuck-scan ordering used by the Supervisor
/// - The global Supervisor capability for recovery operations
///
access(all) contract FlowYieldVaultsSchedulerRegistry {
Expand Down Expand Up @@ -48,7 +49,8 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {

/* --- STATE --- */

/// Registry of all yield vault IDs that participate in scheduling
/// Registry of all live yield vault IDs known to the scheduler infrastructure.
/// This is broader than the recurring-only stuck-scan ordering.
access(self) var yieldVaultRegistry: {UInt64: Bool}

/// Handler capabilities (AutoBalancer) for each yield vault - keyed by yield vault ID
Expand All @@ -75,10 +77,12 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {
/* --- ACCOUNT-LEVEL FUNCTIONS --- */

/// Register a YieldVault and store its handler and schedule capabilities (idempotent)
/// `participatesInStuckScan` should be true only for vaults that currently have recurring config.
access(account) fun register(
yieldVaultID: UInt64,
handlerCap: Capability<auth(FlowTransactionScheduler.Execute) &{FlowTransactionScheduler.TransactionHandler}>,
scheduleCap: Capability<auth(DeFiActions.Schedule) &DeFiActions.AutoBalancer>
scheduleCap: Capability<auth(DeFiActions.Schedule) &DeFiActions.AutoBalancer>,
participatesInStuckScan: Bool
) {
pre {
handlerCap.check(): "Invalid handler capability provided for yieldVaultID \(yieldVaultID)"
Expand All @@ -87,24 +91,28 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {
self.yieldVaultRegistry[yieldVaultID] = true
self.handlerCaps[yieldVaultID] = handlerCap
self.scheduleCaps[yieldVaultID] = scheduleCap
// New vaults go to the head; they haven't executed yet but are freshly registered.

// The registry tracks all live yield vaults, but only recurring vaults
// participate in the Supervisor's stuck-scan ordering.
// If already in the list (idempotent re-register), remove first to avoid duplicates.
let list = self._list()
if list.contains(id: yieldVaultID) {
let _ = list.remove(id: yieldVaultID)
}
list.insertAtHead(id: yieldVaultID)
if participatesInStuckScan {
list.insertAtHead(id: yieldVaultID)
}
emit YieldVaultRegistered(yieldVaultID: yieldVaultID)
}

/// Called on every execution. Moves yieldVaultID to the head (most recently executed)
/// so the Supervisor scans from the tail (least recently executed) for stuck detection — O(1).
/// If the list entry is unexpectedly missing, reinsert it to restore the ordering structure.
/// Called on every execution. Moves scan-participating yieldVaultID to the head
/// (most recently executed) so the Supervisor scans recurring participants from the tail
/// (least recently executed) for stuck detection — O(1).
access(account) fun reportExecution(yieldVaultID: UInt64) {
if !(self.yieldVaultRegistry[yieldVaultID] ?? false) {
let list = self._list()
if !(self.yieldVaultRegistry[yieldVaultID] ?? false) || !list.contains(id: yieldVaultID) {
return
}
let list = self._list()
let _ = list.remove(id: yieldVaultID)
list.insertAtHead(id: yieldVaultID)
}
Expand Down Expand Up @@ -211,13 +219,43 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {
return self.pendingQueue.length
}

/// Returns up to `limit` vault IDs starting from the tail (least recently executed).
/// Supervisor should only scan these for stuck detection instead of all registered vaults.
/// @param limit: Maximum number of IDs to return (caller typically passes MAX_BATCH_SIZE)
access(all) fun getStuckScanCandidates(limit: UInt): [UInt64] {
return self.account.storage
.borrow<&UInt64LinkedList.List>(from: self.executionListStoragePath)!
.tailWalk(limit: limit)
/// Inspects up to `limit` tail entries from the recurring stuck-scan ordering, lazily prunes
/// stale non-recurring entries, and returns the recurring participants encountered.
/// Repeated calls keep making forward progress without unbounded work.
/// NOTE: re-enabling recurring scheduling for a previously pruned vault is not handled here.
/// TODO: add an explicit rejoin path for recurring off -> on in a follow-up PR.
/// This is intentionally account-restricted because it mutates registry state as it prunes.
/// @param limit: Maximum number of tail entries to inspect in this call
access(account) fun pruneAndGetStuckScanCandidates(limit: UInt): [UInt64] {
let list = self._list()
var result: [UInt64] = []
var inspected: UInt = 0
var current = list.tail
while inspected < limit {
if let id = current {
inspected = inspected + 1
let previous = list.nodes[id]?.prev
let scheduleCap = self.scheduleCaps[id]
let isRecurringParticipant =
scheduleCap != nil
&& scheduleCap!.check()
&& scheduleCap!.borrow()?.getRecurringConfig() != nil

if isRecurringParticipant {
result.append(id)
} else {
// Current behavior: removing recurring config prunes the vault from the
// stuck-scan ordering. Re-enabling recurring later still needs an explicit
// rejoin path and is intentionally left to a follow-up PR.
self.dequeuePending(yieldVaultID: id)
let _ = list.remove(id: id)
}
current = previous
} else {
break
}
}
return result
}

/// Get global Supervisor capability, if set
Expand Down
9 changes: 6 additions & 3 deletions cadence/contracts/FlowYieldVaultsSchedulerV1.cdc
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,8 @@ access(all) contract FlowYieldVaultsSchedulerV1 {
/// Detects and recovers stuck yield vaults by directly calling their scheduleNextRebalance().
///
/// Detection methods:
/// 1. State-based: Scans for registered yield vaults with no active schedule that are overdue
/// 1. State-based: Scans recurring yield vaults in stuck-scan order for candidates with
/// no active schedule that are overdue
///
/// Recovery method:
/// - Uses Schedule capability to call AutoBalancer.scheduleNextRebalance() directly
Expand All @@ -172,7 +173,7 @@ access(all) contract FlowYieldVaultsSchedulerV1 {
/// "priority": UInt8 (0=High,1=Medium,2=Low) - for Supervisor self-rescheduling
/// "executionEffort": UInt64 - for Supervisor self-rescheduling
/// "recurringInterval": UFix64 (for Supervisor self-rescheduling)
/// "scanForStuck": Bool (default true - scan up to MAX_BATCH_SIZE least-recently-executed vaults for stuck ones)
/// "scanForStuck": Bool (default true - scan up to MAX_BATCH_SIZE least-recently-executed recurring scan participants for stuck ones)
/// }
access(FlowTransactionScheduler.Execute) fun executeTransaction(id: UInt64, data: AnyStruct?) {
let cfg = data as? {String: AnyStruct} ?? {}
Expand All @@ -186,7 +187,9 @@ access(all) contract FlowYieldVaultsSchedulerV1 {

// STEP 1: State-based detection - scan for stuck yield vaults
if scanForStuck {
let candidates = FlowYieldVaultsSchedulerRegistry.getStuckScanCandidates(limit: UInt(FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE))
let candidates = FlowYieldVaultsSchedulerRegistry.pruneAndGetStuckScanCandidates(
limit: UInt(FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE)
)
for yieldVaultID in candidates {
if FlowYieldVaultsAutoBalancers.isStuckYieldVault(id: yieldVaultID) {
FlowYieldVaultsSchedulerRegistry.enqueuePending(yieldVaultID: yieldVaultID)
Expand Down
8 changes: 4 additions & 4 deletions cadence/scripts/flow-yield-vaults/has_active_schedule.cdc
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
import "FlowYieldVaultsAutoBalancers"

/// Returns true if the yield vault/AutoBalancer has at least one active (Scheduled) transaction.
/// Used to verify that healthy yield vaults maintain their scheduling chain.
/// Returns true if the yield vault/AutoBalancer has at least one active internally-managed
/// transaction. Active includes `Scheduled`, plus a recently `Executed` transaction still
/// within the optimistic-execution grace period.
///
/// @param yieldVaultID: The YieldVault/AutoBalancer ID
/// @return Bool: true if there's at least one Scheduled transaction, false otherwise
/// @return Bool: true if there's at least one active internally-managed transaction, false otherwise
///
access(all) fun main(yieldVaultID: UInt64): Bool {
return FlowYieldVaultsAutoBalancers.hasActiveSchedule(id: yieldVaultID)
}

Loading
Loading