Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
Limit enforcement of subclassed resources
from oslo_limit import limit
def create_server(self, context, server):
claim_quantity = server.get('cores')
claim = limit.ProjectClaim('cores', context.project_id, quantity=claim_quantity)
# Verification after executing the claim is the default behavior, but calling it out
# explicitly here for the sake of the example.
with limit.Enforcer(claim, callback=self.calculate_cores_for_project, verify=True) as enforcer:
enforcer.resource_cleanup(self.delete_server)
reference = driver.create_server(server)
enforcer.filled_claim(reference)
from oslo_limit import limit
def create_server(self, context, server):
claim_quantity = server.get('cores')
claim = limit.ProjectClaim('cores', context.project_id, quantity=claim_quantity)
try:
with limit.Enforcer(claim, callback=self.calculate_cores_for_project, verify=False):
# If another thread completes within the process of the context manager here
# the project is going to be over limit because we've turned off verification.
reference = driver.create_server(server)
except limit.ClaimExceedLimit:
# The claim may have been made but between the point we called .create_server and
# the __exit__ of the context manager, another thread beat us to the punch. We should
# clean up the created resource and reraise the exception
self.delete_server(reference['id'])
raise
# example of associating callbacks to each claim
from oslo_limit import limit
def create_server(self, context, server):
claim = limit.ProjectClaim(context.project_id)
core_quantity = server.get('cores')
claim.add_resource('cores', core_quantity, self._calculate_cores_for_project)
ram_quantity = server.get('ram_mb')
claim.add_resource('ram_mb', ram_quantity, self._calculate_ram_for_project)
instance_quantity = len(server)
claim.add_resource('instances', instance_quantity, self._calculate_instances_for_project)
# Verification after executing the claim is the default behavior, but calling it out
# explicitly here for the sake of the example.
with limit.Enforcer(claim, verify=True) as enforcer:
enforcer.resource_cleanup(self.delete_server)
reference = driver.create_server(server)
enforcer.filled_claim(reference)
# example associating callbacks to the enforcer, making the callback responsible for
# providing usage information for each claim
from oslo_limit import limit
def create_server(self, context, server):
claim = limit.ProjectClaim(context.project_id)
core_quantity = server.get('cores')
claim.add_resource('cores', core_quantity)
ram_quantity = server.get('ram_mb')
claim.add_resource('ram_mb', ram_quantity)
instance_quantity = len(server)
claim.add_resource('instances', instance_quantity)
# Verification after executing the claim is the default behavior, but calling it out
# explicitly here for the sake of the example.
with limit.Enforcer(claim, callback=self._calculate_instance_resource_for_project, verify=True) as enforcer:
enforcer.resource_cleanup(self.delete_server)
reference = driver.create_server(server)
enforcer.filled_claim(reference)
from oslo_limit import limit
def create_server(self, context, server):
# 'cores' is a registered limit in keystone
core_quantity = server.get('cores')
core_claim = limit.ProjectClaim('cores', context.project_id, quantity=core_quantity)
# 'ram_mb' is a registered limit in keystone
ram_quantity = server.get('ram_mb')
ram_claim = limit.ProjectClaim('ram_mb', context.project_id, quantity=ram_quantity)
# It's possible for a resource to have multiple "sub" resources when building a single
# resource. The alternative to this would be to count servers as been the resource in question.
claims = [core_claim, ram_claim]
# Verification after executing the claim is the default behavior, but calling it out
# explicitly here for the sake of the example.
with limit.Enforcer(claims, callback=self.calculate_cores_for_project, verify=True) as enforcer:
enforcer.resource_cleanup(self.delete_server)
reference = driver.create_server(server)
enforcer.filled_claim(reference)
from oslo_limit import limit
def create_server(self, context, server):
# Build a claim for the resources being asked for in the request
claim = limit.ProjectClaim('gpu_cores', context.project_id, quantity=8)
# Add a "parent" resource type to the claim, ultimately saying 'gpu_cores' are a
# subclass of 'cores'. Also supply a usage callback so that oslo.limit can
# encorporate existing usage into the enforcement check.
claim.add_parent_resource_type('cores', self.calculate_cores_for_project)
# Attempt to claim the resources within the context manager. The context manager
# will query keystone for 'cores' and 'gpu_cores' limit information. Then it can
# calculate project usage for both resources using the callbacks provided.
#
# If:
# - gpu_core_claim + gpu_core_usage + core_usage <= cores limit in keystone AND
# - gpu_core_claim <= gpu_cores limit in keystone THEN
# The resources in the request should be claimed.
with limit.Enforcer(claim, callback=self.calculate_gpu_cores_for_project):
driver.create_server(server)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.
You signed in with another tab or window. Reload to refresh your session. You signed out in another tab or window. Reload to refresh your session.