Created
April 24, 2013 15:12
-
-
Save vas6ili/5452886 to your computer and use it in GitHub Desktop.
Simple log4net azure table storage appender.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
using System; | |
using System.Collections.Generic; | |
using log4net.Core; | |
using Microsoft.WindowsAzure.Storage.Table; | |
namespace WindowsAzure.Logging | |
{ | |
/// <summary> | |
/// There used to be a dedicated log table entity class for derived from <see cref="TableEntity" /> | |
/// Because TableEntity uses a lot of reflection, I switched it to a DynamicTableEntity which uses a dictionary of properties | |
/// It removes one indirection layer at the cost of type safety | |
/// </summary> | |
public static class LogTableEntity | |
{ | |
public static class Columns | |
{ | |
public const string EventTimestamp = "EventTimestamp"; | |
public const string Logger = "Logger"; | |
public const string Level = "Level"; | |
public const string LevelName = "LevelName"; | |
public const string Message = "Message"; | |
public const string Exception = "Exception"; | |
public const string Thread = "Thread"; | |
public const string Role = "Role"; | |
public const string RoleInstance = "RoleInstance"; | |
public const string DeploymentId = "DeploymentId"; | |
} | |
public static string CreateRowKey(LoggingEvent entry) | |
{ | |
return string.Format("{0}__{1:yyyyMMddHHmmss}__{2:n}", | |
entry.LoggerName, entry.TimeStamp, Guid.NewGuid()); | |
} | |
public static string CreatePartitionKey(LoggingEvent entry) | |
{ | |
return entry.TimeStamp.ToString("yyyyMMddHH"); | |
} | |
public static DynamicTableEntity CreateTableEntity(LoggingEvent entry) | |
{ | |
var entity = new DynamicTableEntity( | |
partitionKey: CreatePartitionKey(entry), | |
rowKey: CreateRowKey(entry), | |
etag: null, | |
properties: new Dictionary<string, EntityProperty>(capacity: 16) | |
); | |
entity[Columns.EventTimestamp] = new EntityProperty(entry.TimeStamp); | |
entity[Columns.Logger] = new EntityProperty(entry.LoggerName); | |
entity[Columns.Level] = new EntityProperty(entry.Level.Value); | |
entity[Columns.LevelName] = new EntityProperty(entry.Level.Name); | |
entity[Columns.Thread] = new EntityProperty(entry.ThreadName); | |
entity[Columns.Message] = new EntityProperty(entry.RenderedMessage); | |
entity[Columns.Exception] = new EntityProperty(entry.GetExceptionString()); | |
return entity; | |
} | |
public static DynamicTableEntity CreateTableEntity(LoggingEvent entry, string role, string roleInstance, string deploymentId) | |
{ | |
var entity = CreateTableEntity(entry); | |
entity[Columns.Role] = new EntityProperty(role); | |
entity[Columns.RoleInstance] = new EntityProperty(roleInstance); | |
entity[Columns.DeploymentId] = new EntityProperty(deploymentId); | |
return entity; | |
} | |
} | |
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
using System; | |
using System.Collections.Generic; | |
using log4net.Appender; | |
using log4net.Core; | |
using Microsoft.WindowsAzure.ServiceRuntime; | |
using Microsoft.WindowsAzure.Storage; | |
using Microsoft.WindowsAzure.Storage.Table; | |
namespace WindowsAzure.Logging | |
{ | |
public sealed class TableStorageAppender : BufferingAppenderSkeleton | |
{ | |
public const int MaxLogsPerBatch = 100; | |
public const string DefaultConnectionStringKey = "Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString"; | |
public const string DefaultTableName = "LogTable"; | |
private static string __RoleName; | |
private static string __RoleInstanceId; | |
private static string __DeploymentId; | |
private static bool IsAzureRole = false; | |
private CloudTable _table; | |
public string ConnectionStringKey { get; set; } | |
public string TableName { get; set; } | |
public TimeSpan ServerTimeout { get; set; } | |
public TimeSpan MaximumExecutionTime { get; set; } | |
public TableStorageAppender() | |
{ | |
ConnectionStringKey = DefaultConnectionStringKey; | |
TableName = DefaultTableName; | |
} | |
public override void ActivateOptions() | |
{ | |
base.ActivateOptions(); | |
try | |
{ | |
var account = GetStorageAccount(); | |
var client = account.CreateCloudTableClient(); | |
if (ServerTimeout > TimeSpan.Zero) | |
client.ServerTimeout = ServerTimeout; | |
if (MaximumExecutionTime > TimeSpan.Zero) | |
client.MaximumExecutionTime = MaximumExecutionTime; | |
_table = client.GetTableReference(TableName); | |
var created = _table.CreateIfNotExists(); | |
} | |
catch (Exception ex) | |
{ | |
ErrorHandler.Error("An exception occurred during appender activation", ex); | |
return; | |
} | |
TrySetAzureRoleProperties(); | |
} | |
protected override void SendBuffer(LoggingEvent[] events) | |
{ | |
if (_table == null) | |
return; | |
// A batch must include only entities with the same PartitionKey. | |
// Because logging events are buffered and the partition key is the event datetime with hour precision, | |
// it is possible to have multiple partition keys in the same buffer | |
// The dictionary below is used to keep a list of batches keyed by PartitionKey | |
var batches = new Dictionary<string, TableBatchOperation>(StringComparer.Ordinal); | |
foreach (var @event in events) | |
{ | |
var entry = CreateLogEntry(@event); | |
TableBatchOperation batch; | |
if (!batches.TryGetValue(entry.PartitionKey, out batch)) | |
{ | |
batch = new TableBatchOperation(); | |
batches.Add(entry.PartitionKey, batch); | |
} | |
batch.Insert(entry); | |
// A table storage batch can have a maximum of 100 operations per batch. | |
// If the batch is full, begin executing it asynchronously, | |
// then remove it from the batch dictionary making room for another batch with the same partition key | |
if (batch.Count == MaxLogsPerBatch) | |
{ | |
_table.BeginExecuteBatch(batch, ExecuteBatchAsyncCallback, null); | |
batches.Remove(entry.PartitionKey); | |
} | |
} | |
// execute all batches asynchronously | |
foreach (var batch in batches.Values) | |
{ | |
_table.BeginExecuteBatch(batch, ExecuteBatchAsyncCallback, null); | |
} | |
} | |
private static bool TrySetAzureRoleProperties() | |
{ | |
try | |
{ | |
var r = RoleEnvironment.CurrentRoleInstance; | |
__RoleName = r.Role.Name; | |
__RoleInstanceId = r.Id; | |
__DeploymentId = RoleEnvironment.DeploymentId; | |
IsAzureRole = true; | |
} | |
catch (InvalidOperationException) | |
{ | |
// When a project is not run under Azure role service environment, | |
// an InvalidOperationException is thrown with "role discovery data is unavailable" message | |
} | |
return IsAzureRole; | |
} | |
private static ITableEntity CreateLogEntry(LoggingEvent @event) | |
{ | |
return IsAzureRole | |
? LogTableEntity.CreateTableEntity(@event, __RoleName, __RoleInstanceId, __DeploymentId) | |
: LogTableEntity.CreateTableEntity(@event); | |
} | |
private CloudStorageAccount GetStorageAccount() | |
{ | |
var connectionString = RoleEnvironment.GetConfigurationSettingValue(ConnectionStringKey); | |
if (string.IsNullOrWhiteSpace(connectionString)) | |
return CloudStorageAccount.DevelopmentStorageAccount; | |
else | |
return CloudStorageAccount.Parse(connectionString); | |
} | |
private void ExecuteBatchAsyncCallback(IAsyncResult asyncResult) | |
{ | |
// Log any invidual batch operation errors | |
try | |
{ | |
foreach (var result in _table.EndExecuteBatch(asyncResult)) | |
{ | |
if (result.HttpStatusCode >= 400) | |
{ | |
ErrorHandler.Error( | |
string.Format("Table Storage service returned status code {0} when inserting log entry with etag {1}", | |
result.HttpStatusCode, result.Etag)); | |
} | |
} | |
} | |
catch (Exception ex) | |
{ | |
ErrorHandler.Error("Error inserting log entry batch to " + _table.Uri, ex); | |
} | |
} | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment