I hereby claim:
- I am draffensperger on github.
- I am draffensperger (https://keybase.io/draffensperger) on keybase.
- I have a public key whose fingerprint is 11AD 4270 6B18 7B21 BDCF 7A62 66E0 7480 D06A 9FE6
To claim this, I am signing this object:
function getGCalData() { | |
var ss = SpreadsheetApp.getActiveSpreadsheet(); | |
var inSheet = ss.getSheetByName("GCalParams"); | |
var id = inSheet.getRange("B1").getValue(); | |
var startDate = inSheet.getRange("B2").getValue(); | |
var endDate = inSheet.getRange("B3").getValue(); | |
var cal = CalendarApp.getCalendarById(id); | |
var events = cal.getEvents(startDate, endDate); |
function peopleBusy(people, startRow, startCol, endRow, endCol) { | |
startRow = startRow || 2; | |
startCol = startCol || 2; | |
endRow = endRow || 40; | |
endCol = endCol || 9; | |
people = people.split(","); | |
var ss = SpreadsheetApp.getActiveSpreadsheet(); | |
var n, i, j; |
Option Explicit | |
Private Type PersonQuery | |
firstName As String | |
lastName As String | |
fullName As String | |
firstNames As Range | |
lastNames As Range | |
fullNames As Range | |
namesSwapped As Boolean |
require "rubygems" | |
require "bundler/setup" | |
require "stringex" | |
## -- Rsync Deploy config -- ## | |
# Be sure your public key is listed in your server's ~/.ssh/authorized_keys file | |
ssh_user = "user@domain.com" | |
ssh_port = "22" | |
document_root = "~/website.com/" | |
rsync_delete = false |
def each_file() | |
Dir.glob("**/*") do |file| | |
yield file unless File.directory? file | |
end | |
end | |
def file_md5_name(file) | |
ext = File.extname file | |
digest = Digest::MD5.base64digest File.read file |
I hereby claim:
To claim this, I am signing this object:
module JobDuplicateChecker | |
def duplicate_job?(*args) | |
job_in_retries?(args) || older_job_running?(args) | |
end | |
private | |
def older_job_running?(args) | |
workers = Sidekiq::Workers.new | |
self_worker = workers.find { |_, _, work| work['payload']['jid'] == jid } |
In MPDX, the Google Contacts sync job takes a long time and the google accounts loop to sync each job could benefit from find_each(batch_size: 1)
. Basically it seems likes find_each
pulls in the records in batches and then saves them in an array to enumerate through. Here's a comparison of the memory results with different batch_sizes
that I did using a similar, but contrived MemHungry
model. To setup, first do 6.times { MemHungry.create }
.
Using the default batch_size
of 1000 the memory at the end reflecs all 6 objects and the RAM they hold onto:
[1] pry(main)> MemHungry.all.find_each { |m| puts m.id; m.eat_memory; }
MemHungry Load (0.5ms) SELECT "mem_hungries".* FROM "mem_hungries" ORDER BY "mem_hungries"."id" ASC LIMIT 1000
1
Memory before GC: 112.63671875
Memory before allocation: 159.90234375
Memory after allocation: 1759.90234375
function onInstall(e) { | |
onOpen(); | |
} | |
function onOpen(e) { | |
SpreadsheetApp.getUi().createAddonMenu() | |
.addItem('Recalculate schedule', 'use') | |
.addToUi(); | |
} |
workspace(name = "test_proto") |