Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 46 additions & 0 deletions backend/api/v1/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,3 +89,49 @@ def get_genealogy(current_user, batch_id):
'status': 'success',
'data': history
}), 200

@processing_bp.route('/batches/<int:batch_id>/spectral-scan', methods=['POST'])
@token_required
def submit_spectral_scan(current_user, batch_id):
"""
Ingest raw chemical scan data and trigger autonomous financial updates.
"""
from backend.services.grading_engine import GradingEngine
data = request.get_json()
if not data:
return jsonify({'status': 'error', 'message': 'No scan data provided'}), 400

grade, penalty = GradingEngine.process_spectral_scan(batch_id, data)

return jsonify({
'status': 'success',
'data': {
'final_grade': grade,
'pricing_penalty': penalty,
'cascading_status': 'COMPLETED'
}
}), 201

@processing_bp.route('/batches/<int:batch_id>/valuation', methods=['GET'])
@token_required
def get_batch_valuation(current_user, batch_id):
"""
Fetch the current real-time valuation of a batch after quality adjustments.
"""
from backend.models.procurement import BulkOrder
# Simplified: Get the latest order for the supply batch linked to this processing batch
# In reality, this would involve complex relationship traversals
batch = ProcessingBatch.query.get(batch_id)
if not batch:
return jsonify({'status': 'error', 'message': 'Batch not found'}), 404

# Mocking valuation response for demonstration of API structure
return jsonify({
'status': 'success',
'data': {
'batch_id': batch_id,
'market_valuation': 12500.00, # Simulated
'quality_modifier': 0.85, # 15% penalty applied
'currency': 'USD'
}
}), 200
3 changes: 3 additions & 0 deletions backend/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
from .sustainability import CarbonPractice, CreditLedger, AuditRequest, CarbonLedger, EmissionSource, SustainabilityScore
from .vendor_profile import VendorProfile # Updated from procurement to vendor_profile
from .procurement import ProcurementItem, BulkOrder, OrderEvent
from .irrigation import IrrigationZone, SensorLog, ValveStatus, IrrigationSchedule
from .processing import ProcessingBatch, StageLog, QualityCheck, ProcessingStage, SpectralScanData, DynamicGradeAdjustment
from .irrigation import IrrigationZone, SensorLog, ValveStatus, IrrigationSchedule, AquiferLevel, WaterRightsQuota
from .processing import ProcessingBatch, StageLog, QualityCheck, ProcessingStage
from .insurance_v2 import CropPolicy, ClaimRequest, PayoutLedger, AdjusterNote
Expand Down Expand Up @@ -80,6 +82,7 @@
'LedgerAccount', 'LedgerTransaction', 'LedgerEntry',
'FXValuationSnapshot', 'Vault', 'VaultCurrencyPosition', 'FXRate',
'AccountType', 'EntryType', 'TransactionType',
'SpectralScanData', 'DynamicGradeAdjustment'
'AquiferLevel', 'WaterRightsQuota'
'SpectralScanData', 'DynamicGradeAdjustment',
'ComponentWearMap', 'MaintenanceEscrow'
Expand Down
3 changes: 3 additions & 0 deletions backend/models/market.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ class ForwardContract(db.Model):
hedge_ratio = db.Column(db.Float) # Percentage of total yield locked in
market_volatility_at_lock = db.Column(db.Float)

# Cascading Quality Adjustments (L3-1604)
quality_penalty_clause = db.Column(db.Float, default=0.0) # Percentage deduction from final payout

def to_dict(self):
return {
'id': self.id,
Expand Down
38 changes: 38 additions & 0 deletions backend/models/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,3 +90,41 @@ def to_dict(self):
'is_passed': self.is_passed,
'timestamp': self.timestamp.isoformat()
}

class SpectralScanData(db.Model):
"""
Simulated 'optical/spectral scans' for raw chemical analysis (L3-1604).
"""
__tablename__ = 'spectral_scans'

id = db.Column(db.Integer, primary_key=True)
batch_id = db.Column(db.Integer, db.ForeignKey('processing_batches.id'), nullable=False)

# Nutritional parameters (Simulated)
moisture_percentage = db.Column(db.Float)
brix_level = db.Column(db.Float) # Sugar content
protein_percentage = db.Column(db.Float)
fiber_percentage = db.Column(db.Float)

# Spectral metadata
wavelength_range = db.Column(db.String(50)) # e.g., "700nm-2500nm"
scan_integrity_score = db.Column(db.Float) # 0.0 - 1.0

timestamp = db.Column(db.DateTime, default=datetime.utcnow)

class DynamicGradeAdjustment(db.Model):
"""
Tracks cascading financial recalculations after grading (L3-1604).
"""
__tablename__ = 'dynamic_grade_adjustments'

id = db.Column(db.Integer, primary_key=True)
batch_id = db.Column(db.Integer, db.ForeignKey('processing_batches.id'), nullable=False)

old_grade = db.Column(db.String(10))
new_grade = db.Column(db.String(10))

price_penalty_factor = db.Column(db.Float) # e.g., -0.15 for 15% drop
adjustment_reason = db.Column(db.String(255))

timestamp = db.Column(db.DateTime, default=datetime.utcnow)
3 changes: 3 additions & 0 deletions backend/models/procurement.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,9 @@ class BulkOrder(db.Model):
tax_amount = db.Column(db.Float)
shipping_cost = db.Column(db.Float, default=0)

# Real-time Quality Adjustment (L3-1604)
real_time_price_modifier = db.Column(db.Float, default=1.0) # Multiply unit_price by this

# Logistics Escrow
customs_clearance_status = db.Column(db.String(20), default='PENDING')
funds_in_escrow = db.Column(db.Boolean, default=False)
Expand Down
99 changes: 99 additions & 0 deletions backend/services/grading_engine.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
from backend.models.processing import SpectralScanData, DynamicGradeAdjustment, ProcessingBatch
from backend.models.traceability import SupplyBatch, QualityGrade
from backend.models.market import ForwardContract
from backend.models.barter import ResourceValueIndex
from backend.models.procurement import BulkOrder
from backend.extensions import db
from datetime import datetime
import logging

logger = logging.getLogger(__name__)

class GradingEngine:
"""
Autonomous Quality Assurance & Spectral Grading Engine (L3-1604).
Implements Cascading Financial Recalculation across contracts and barter values.
"""

@staticmethod
def process_spectral_scan(batch_id, scan_data):
"""
Parses raw chemical parameters to output a grade and financial multiplier.
scan_data: {'moisture': 12.5, 'brix': 18.2, 'protein': 14.5}
"""
batch = ProcessingBatch.query.get(batch_id)
if not batch:
return None, "Batch not found"

# 1. Archive raw scan data
scan = SpectralScanData(
batch_id=batch_id,
moisture_percentage=scan_data.get('moisture'),
brix_level=scan_data.get('brix'),
protein_percentage=scan_data.get('protein'),
scan_integrity_score=0.98
)
db.session.add(scan)

# 2. Logic to determine grade (Simplified)
# Grade A: Protein > 13%, Brix > 15%, Moisture < 14%
new_grade = 'B'
penalty = 0.0

if scan_data.get('protein', 0) > 13 and scan_data.get('brix', 0) > 15:
new_grade = 'A'
elif scan_data.get('moisture', 0) > 15:
new_grade = 'C'
penalty = 0.20 # 20% price drop

# 3. Cascading Financial Recalculation (L3 Requirement)
# This autonomously updates all linked financial instruments
GradingEngine.trigger_cascading_updates(batch, new_grade, penalty)

db.session.commit()
return new_grade, penalty

@staticmethod
def trigger_cascading_updates(processing_batch, grade, penalty):
"""
The core L3 complexity: Updates ForwardContracts, Barter values, and BulkOrders
without user intervention.
"""
logger.info(f"Cascading Quality Update for Batch {processing_batch.id} -> Grade {grade}")

# A. Update associated SupplyBatches
for supply_batch in processing_batch.supply_batches:
old_grade = supply_batch.predicted_quality_grade
supply_batch.predicted_quality_grade = grade

# Log adjustment
adjustment = DynamicGradeAdjustment(
batch_id=processing_batch.id,
old_grade=old_grade,
new_grade=grade,
price_penalty_factor=penalty,
adjustment_reason=f"Spectral Scan: Protein {grade} threshold alignment"
)
db.session.add(adjustment)

# B. Payout adjustment for Forward Contracts
contract = ForwardContract.query.filter_by(batch_id=supply_batch.id).first()
if contract:
contract.quality_penalty_clause = penalty
logger.info(f"Updated ForwardContract {contract.id} with {penalty*100}% quality penalty.")

# C. Update Procurement Bulk Orders unit pricing
orders = BulkOrder.query.filter_by(item_id=supply_batch.id).all() # Simplified link
for order in orders:
order.real_time_price_modifier = (1.0 - penalty)
order.total_amount = order.quantity * order.unit_price * order.real_time_price_modifier

# D. Update Barter Power (ResourceValueIndex)
# Lowering the trade value of this specific crop type globally if
# this batch is representative of regional quality
index_entry = ResourceValueIndex.query.filter_by(item_name=processing_batch.product_type).first()
if index_entry and grade == 'C':
index_entry.demand_multiplier *= 0.95 # 5% drop in barter power
logger.info(f"Regional Barter Index for {processing_batch.product_type} adjusted due to low quality scans.")

db.session.commit()
40 changes: 40 additions & 0 deletions backend/tasks/pricing_tasks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
from backend.celery_app import celery_app
from backend.models.processing import DynamicGradeAdjustment, ProcessingBatch
from backend.models.procurement import BulkOrder
from backend.extensions import db
from datetime import datetime
import logging

logger = logging.getLogger(__name__)

@celery_app.task(name='tasks.batch_pricing_normalization')
def batch_pricing_normalization():
"""
End-of-day task to normalize pricing across batches and audit financial quality drifts.
"""
logger.info("Starting End-of-Day Pricing Normalization...")

# 1. Fetch all adjustments from today
today_start = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
adjustments = DynamicGradeAdjustment.query.filter(DynamicGradeAdjustment.timestamp >= today_start).all()

normalization_count = 0
total_penalty_revenue_loss = 0.0

for adj in adjustments:
# Cross-reference with BulkOrders to ensure the modifier is correctly applied
batch_id = adj.batch_id

# In a real system, we would reconcile batch_id -> supply_batch_id -> bulk_order.item_id
# Here we simulate the reconciliation logic
logger.info(f"Reconciling financial drift for Batch {batch_id}: Grade {adj.old_grade} -> {adj.new_grade}")

normalization_count += 1
total_penalty_revenue_loss += adj.price_penalty_factor

db.session.commit()
return {
'status': 'success',
'adjusted_records': normalization_count,
'cumulative_penalty_flux': total_penalty_revenue_loss
}
Loading