public
Created

HDFS stack trace - MultipleOutputs + TableMapper

  • Download Gist
Mapper.java
Java
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
import com.edmunds.lead.thrift.gen.Address;
import com.edmunds.lead.thrift.gen.Contract;
import com.edmunds.lead.thrift.gen.Customer;
import com.edmunds.lead.thrift.gen.Lead;
import com.edmunds.lead.thrift.gen.Option;
import com.edmunds.lead.thrift.gen.Phone;
import com.edmunds.lead.thrift.gen.Subscription;
import com.edmunds.lead.thrift.gen.Tracking;
import com.edmunds.lead.thrift.gen.Vehicle;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;
import org.apache.thrift.TDeserializer;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TCompactProtocol;
import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import java.util.Map;
 
/**
* Mapper for LeadProcessor.
*
* @author bneale
*/
public class LeadMapper extends TableMapper<Text, Text> {
 
/**
* Counters.
*/
public static enum Counters {ROWS, CANNOT_DESERIALIZE, EMPTY, INTERRUPTED}
 
/**
* Delimiter -.
*/
public static final char DELIMITER = (char) 29;
 
private TDeserializer deserializer;
private MultipleOutputs mos;
private String timestamp;
 
public void setup(Context context) {
deserializer = new TDeserializer(new TCompactProtocol.Factory());
mos = new MultipleOutputs(context);
DateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
timestamp = df.format(new Date());
}
 
public void setTimestamp(String timestamp) {
this.timestamp = timestamp;
}
 
private String clean(String src) {
if (src == null) {
return src;
}
String result = src.replaceAll("\n", "");
result = result.replaceAll("\r", "");
return result;
}
 
protected String generateFields() {
StringBuilder builder = new StringBuilder();
return builder.append(timestamp).append(DELIMITER) //process_datetime
.append("").append(DELIMITER) //record_datetime
.append("").append(DELIMITER) //additional_data
.append("").toString(); //status_msg
}
 
protected void getLead(Lead lead) throws IOException,
InterruptedException {
 
if (lead == null) {
return;
}
 
StringBuilder leadString = new StringBuilder();
leadString.append(lead.getId()).append(DELIMITER)
.append(lead.getOrderId()).append(DELIMITER)
.append(lead.getSyndicationPartner()).append(DELIMITER)
.append(lead.getStatusCode()).append(DELIMITER)
.append(lead.getSubStatusCode()).append(DELIMITER)
.append(lead.getMethodCode()).append(DELIMITER)
.append(lead.programCode).append(DELIMITER)
.append(lead.confirmed ? "Y" : "N").append(DELIMITER)
.append(lead.getReservationKey()).append(DELIMITER)
.append(lead.getCreateDate()).append(DELIMITER)
.append(lead.getUpdateDate()).append(DELIMITER)
.append(lead.getProcessTime()).append(DELIMITER)
.append(lead.getComment()).append(DELIMITER)
.append(getContract(lead.getContract()))
.append(getTracking(lead.getTracking()))
.append(getCustomer(lead.getCustomer()))
.append(lead.getSubStatusCodeId()).append(DELIMITER)
.append(lead.getStatusCodeId()).append(DELIMITER)
.append(lead.getMethodCodeId()).append(DELIMITER)
.append(String.valueOf(lead.getUpdateTimestamp())).append(DELIMITER)
.append(generateFields());
 
//context.write(new Text("lead"), new Text(clean(leadString.toString())));
mos.write("lead", new Text("lead"), new Text(clean(leadString.toString())));
}
 
//To get Contract
private String getContract(Contract contract)
throws IOException {
 
if (contract == null) {
return "";
}
 
StringBuilder contractString = new StringBuilder();
contractString.append(contract.getId()).append(DELIMITER)
.append(contract.getCode()).append(DELIMITER)
.append(contract.getApplicationType()).append(DELIMITER)
.append(contract.getName()).append(DELIMITER) //optional field
.append(contract.getDisplayName()).append(DELIMITER) //optional field
.append(contract.getEffectiveDate()).append(DELIMITER) //optional field
.append(contract.getTerminationDate()).append(DELIMITER) //optional field
.append(contract.active ? "Y" : "N").append(DELIMITER) //optional field
.append(contract.getReportType()).append(DELIMITER)
.append(generateFields());
 
return contractString.toString();
}
 
//To get Contract Attributes
private void getContractAttributes(Lead lead, Context context)
throws IOException, InterruptedException {
 
if (lead.getContract().getAttributes() == null) {
return;
}
 
Map<String, String> contractAttributesMap = lead.getContract().getAttributes();
for (String key : contractAttributesMap.keySet()) {
 
StringBuilder contractAttributesString = new StringBuilder();
String contractAttributeKey = key;
String contractAttributeValue = contractAttributesMap.get(key);
 
contractAttributesString.append(lead.getId()).append(DELIMITER)
.append(lead.getContract().getId()).append(DELIMITER)
.append(contractAttributeKey).append(DELIMITER)
.append(contractAttributeValue).append(DELIMITER)
.append(generateFields());
 
context.write(new Text("lead_contract_attributes"), new Text(clean(contractAttributesString.toString())));
mos.write("leadcontractattributes", new Text("lead_contract_attributes"), new Text(clean(contractAttributesString.toString())));
 
}
}
 
//To get Tracking
private String getTracking(Tracking tracking)
throws IOException {
 
if (tracking == null) {
return "";
}
 
StringBuilder trackingString = new StringBuilder();
trackingString.append(tracking.getZipCode()).append(DELIMITER)
.append(tracking.getCookieText()).append(DELIMITER)
.append(tracking.getUserTrack()).append(DELIMITER)
.append(tracking.getUserSession()).append(DELIMITER)
.append(tracking.getClientIp()).append(DELIMITER)
.append(tracking.getClientHost()).append(DELIMITER)
.append(generateFields());
 
return trackingString.toString();
}
 
//To get Customer
private String getCustomer(Customer customer)
throws IOException {
 
if (customer == null) {
return "";
}
 
StringBuilder customerString = new StringBuilder();
customerString.append(customer.getType()).append(DELIMITER)
.append(customer.getFirstName()).append(DELIMITER)
.append(customer.getLastName()).append(DELIMITER)
.append(customer.getEmail()).append(DELIMITER)
.append(getPhone(customer.getPhone())).append(DELIMITER)
.append(getAddress(customer.getAddress())).append(DELIMITER)
.append(customer.getId()).append(DELIMITER)
.append(generateFields());
 
return customerString.toString();
}
 
//To get Phone object of customer
private String getPhone(Phone phone) throws IOException {
 
if (phone == null) {
return "";
}
 
StringBuilder phoneString = new StringBuilder();
phoneString.append(phone.getAreaCode()).append(DELIMITER)
.append(phone.getExtension()).append(DELIMITER)
.append(phone.getPrefix()).append(DELIMITER)
.append(phone.getSuffix()).append(DELIMITER)
.append(phone.temporary ? "Y" : "N").append(DELIMITER);
 
return phoneString.toString();
}
 
//To get address of customer
private String getAddress(Address address)
throws IOException {
 
if (address == null) {
return "";
}
 
StringBuilder addressString = new StringBuilder();
addressString.append(address.getStreet()).append(DELIMITER)
.append(address.getApartment()).append(DELIMITER)
.append(address.getCity()).append(DELIMITER)
.append(address.getState()).append(DELIMITER)
.append(address.getCountry()).append(DELIMITER)
.append(address.getZip()).append(DELIMITER);
 
return addressString.toString();
}
 
//To get attributes of customer
private void getCustomerAttributes(Lead lead, Context context)
throws IOException, InterruptedException {
 
if (lead.getCustomer().getAttributes() == null) {
return;
}
 
Map<String, String> customerAttributesMap = lead.getCustomer().getAttributes();
 
for (String key : customerAttributesMap.keySet()) {
StringBuilder customerAttributesString = new StringBuilder();
String customerAttributeKey = key;
String customerAttributeValue = customerAttributesMap.get(key);
 
customerAttributesString.append(lead.getId()).append(DELIMITER)
.append(lead.getCustomer().getId()).append(DELIMITER)
.append(customerAttributeKey).append(DELIMITER)
.append(customerAttributeValue).append(DELIMITER)
.append(generateFields());
 
context.write(new Text("lead_customer_attributes"), new Text(clean(customerAttributesString.toString())));
mos.write("leadcustomerattributes", new Text("lead_customer_attributes"), new Text(clean(customerAttributesString.toString())));
 
}
}
 
//To get vehicles
private void getVehicles(Lead lead, Context context)
throws IOException, InterruptedException {
 
if (lead.getVehicles() == null) {
return;
}
 
Map<String, Vehicle> vehicleMap = lead.getVehicles();
 
for (String key : vehicleMap.keySet()) {
StringBuilder vehiclesString = new StringBuilder();
String vehicleKey = key;
Vehicle vehicleValue = vehicleMap.get(key);
String vin = vehicleValue.getVin();
String type = vehicleValue.getType();
String year = vehicleValue.getYear();
String make = vehicleValue.getMake();
String model = vehicleValue.getModel();
String trim = vehicleValue.getTrim();
String comment = vehicleValue.getComment();
String classification = vehicleValue.getClassification();
 
vehiclesString.append(lead.getId()).append(DELIMITER)
.append(vehicleKey).append(DELIMITER)
.append(vin).append(DELIMITER)
.append(type).append(DELIMITER)
.append(year).append(DELIMITER)
.append(make).append(DELIMITER)
.append(model).append(DELIMITER)
.append(trim).append(DELIMITER)
.append(comment).append(DELIMITER)
.append(classification).append(DELIMITER)
.append(generateFields());
 
getVehicleOptions(vehicleValue, lead, context);
getVehicleAttributes(vehicleValue, lead, context);
getVehicleSearchParameters(vehicleValue, lead, context);
context.write(new Text("lead_vehicles"), new Text(clean(vehiclesString.toString())));
mos.write("leadvehicles", new Text("lead_vehicles"), new Text(clean(vehiclesString.toString())));
 
}
 
}
 
//To get vehicles option
private void getVehicleOptions(Vehicle vehicle, Lead lead, Context context) throws
IOException, InterruptedException {
 
if (vehicle.getOptions() == null) {
return;
}
 
List<Option> options = vehicle.getOptions();
 
for (Option option : options) {
 
if (option == null) {
continue;
}
 
StringBuilder optionString = new StringBuilder();
optionString.append(lead.getId()).append(DELIMITER)
.append(option.getCode()).append(DELIMITER)
.append(option.getName()).append(DELIMITER)
.append(option.getTmvPrice()).append(DELIMITER)
.append(option.getMsrpPrice()).append(DELIMITER)
.append(option.getInvoicePrice()).append(DELIMITER)
.append(option.getId()).append(DELIMITER)
.append(generateFields());
 
context.write(new Text("lead_vehicles_options"), new Text(clean(optionString.toString())));
mos.write("leadvehiclesoptions", new Text("lead_vehicles_options"), new Text(clean(optionString.toString())));
}
}
 
//To get vehicle attributes
private void getVehicleAttributes(Vehicle vehicle, Lead lead, Context context)
throws IOException, InterruptedException {
 
if (vehicle.getAttributes() == null) {
return;
}
 
Map<String, String> vehicleAttributesMap = vehicle.getAttributes();
 
for (String key : vehicleAttributesMap.keySet()) {
StringBuilder vehicleattributesString = new StringBuilder();
String vehicleAttributeKey = key;
String vehicleAttributeValue = vehicleAttributesMap.get(key);
 
vehicleattributesString.append(lead.getId()).append(DELIMITER)
.append(vehicleAttributeKey).append(DELIMITER)
.append(vehicleAttributeValue).append(DELIMITER)
.append(generateFields());
 
context.write(new Text("lead_vehicles_attributes"), new Text(clean(vehicleattributesString.toString())));
mos.write("leadvehiclesattributes", new Text("lead_vehicles_attributes"), new Text(clean(vehicleattributesString.toString())));
}
}
 
//To get vehicle search parameters
private void getVehicleSearchParameters(Vehicle vehicle, Lead lead, Context context)
throws IOException, InterruptedException {
 
if (vehicle.getSearchParameters() == null) {
return;
}
 
Map<String, String> vehicleSearchParametersMap = vehicle.getSearchParameters();
 
for (String key : vehicleSearchParametersMap.keySet()) {
StringBuilder vehicleSearchParametersString = new StringBuilder();
String vehicleSearchParametersKey = key;
String vehicleSearchParametersValue = vehicleSearchParametersMap.get(key);
 
vehicleSearchParametersString.append(lead.getId()).append(DELIMITER)
.append(vehicleSearchParametersKey).append(DELIMITER)
.append(vehicleSearchParametersValue).append(DELIMITER)
.append(generateFields());
 
context.write(new Text("lead_vehicles_searchparameters"), new Text(clean(vehicleSearchParametersString.toString())));
mos.write("leadvehiclessearchparameters", new Text("lead_vehicles_searchparameters"), new Text(clean(vehicleSearchParametersString.toString())));
}
}
 
//To get subscriptions
private void getSubscriptions(Lead lead, Context context)
throws IOException, InterruptedException {
 
if (lead.getCustomer().getSubscriptions() == null) {
return;
}
 
List<Subscription> subscriptions = lead.getCustomer().getSubscriptions();
 
for (Subscription subscription : subscriptions) {
 
if (subscription == null) {
continue;
}
 
StringBuilder subscriptionString = new StringBuilder();
subscriptionString.append(lead.getId()).append(DELIMITER)
.append(lead.getCustomer().getId()).append(DELIMITER)
.append(subscription.getProduct()).append(DELIMITER)
.append(subscription.getDeliveryFormat()).append(DELIMITER)
.append(subscription.subscribed ? "Y" : "N").append(DELIMITER)
.append(generateFields());
 
context.write(new Text("lead_customer_subscriptions"), new Text(clean(subscriptionString.toString())));
mos.write("leadcustomersubscriptions", new Text("lead_customer_subscriptions"), new Text(clean(subscriptionString.toString())));
}
}
 
//To get permissions
private void getPermissions(Lead lead, Context context)
throws IOException, InterruptedException {
 
if (lead.getCustomer().getPermissions() == null) {
return;
}
 
List<com.edmunds.lead.thrift.gen.Permission> permissions = lead.getCustomer().getPermissions();
 
for (com.edmunds.lead.thrift.gen.Permission permission : permissions) {
if (permission == null) {
continue;
}
StringBuilder permissionsString = new StringBuilder();
permissionsString.append(lead.getId()).append(DELIMITER)
.append(lead.getCustomer().getId()).append(DELIMITER)
.append(permission.getName()).append(DELIMITER)
.append(permission.granted ? "Y" : "N").append(DELIMITER)
.append(generateFields());
 
context.write(new Text("lead_customer_permissions"), new Text(clean(permissionsString.toString())));
mos.write("leadcustomerpermissions", new Text("lead_customer_permissions"), new Text(clean(permissionsString.toString())));
}
 
}
 
private void getLeadAttributes(Lead lead, Context context)
throws IOException, InterruptedException {
 
if (lead.getAttributes() == null) {
return;
}
 
Map<String, String> leadAttributesMap = lead.getAttributes();
for (String key : leadAttributesMap.keySet()) {
StringBuilder leadattributesString = new StringBuilder();
String leadAttributeKey = key;
String leadAttributeValue = leadAttributesMap.get(key);
 
leadattributesString.append(lead.getId()).append(DELIMITER)
.append(leadAttributeKey).append(DELIMITER)
.append(leadAttributeValue).append(DELIMITER)
.append(generateFields());
 
context.write(new Text("lead_attributes"), new Text(clean(leadattributesString.toString())));
mos.write("leadattributes", new Text("lead_attributes"), new Text(clean(leadattributesString.toString())));
 
}
}
 
public void map(ImmutableBytesWritable row, Result values, Context context)
throws IOException, InterruptedException {
 
setup(context);
 
byte[] leadBytes = values.getValue(Bytes.toBytes("binary"), Bytes.toBytes("object"));
 
Lead lead = new Lead();
if (leadBytes.length > 0) {
try {
deserializer.deserialize(lead, leadBytes);
 
// get lead
//getLead(lead, context);
getLead(lead);
 
/*
// get contract attributes
getContractAttributes(lead, context);
 
//get customer attributes
getCustomerAttributes(lead, context);
 
//get subscriptions
getSubscriptions(lead, context);
 
//get permissions
getPermissions(lead, context);
 
//get vehicles
getVehicles(lead, context);
 
//get lead attributes
getLeadAttributes(lead, context);
*/
} catch(TException e) {
context.getCounter(Counters.CANNOT_DESERIALIZE).increment(1);
} catch (InterruptedException e) {
context.getCounter(Counters.INTERRUPTED).increment(1);
}
context.getCounter(Counters.ROWS).increment(1);
} else {
context.getCounter(Counters.EMPTY).increment(1);
}
}
 
public void cleanup(Context context) throws IOException, InterruptedException {
mos.close();
}
 
}
Processor.java
Java
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.GenericOptionsParser;
 
import java.io.IOException;
 
 
/**
* Driver for LeadProcessor Mapper.
*
* @author bneale
*/
public final class LeadProcessor {
 
private LeadProcessor() {
// do nothing
}
 
static final String TABLE_NAME = "lead";
 
public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException {
Job job = new Job(conf, TABLE_NAME);
job.setJarByClass(LeadTableMap.class);
 
 
Scan scan = new Scan();
//long start = Long.parseLong(args[0]);
//long stop = Long.parseLong(args[1]);
//scan.setTimeRange(start,stop);
scan.setMaxVersions();
 
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
 
job.setMapperClass(LeadMapper.class);
job.setNumReduceTasks(0);
 
job.setOutputFormatClass(TextOutputFormat.class);
//FileOutputFormat.setOutputPath(job, new Path(args[2]));
FileOutputFormat.setOutputPath(job, new Path(args[0]));
 
MultipleOutputs.addNamedOutput(job, "lead", TextOutputFormat.class, Text.class, Text.class);
MultipleOutputs.addNamedOutput(job, "leadcontractattributes", TextOutputFormat.class, Text.class, Text.class);
MultipleOutputs.addNamedOutput(job, "leadcustomerattributes", TextOutputFormat.class, Text.class, Text.class);
MultipleOutputs.addNamedOutput(job, "leadcustomersubscriptions", TextOutputFormat.class, Text.class, Text.class);
MultipleOutputs.addNamedOutput(job, "leadcustomerpermissions", TextOutputFormat.class, Text.class, Text.class);
MultipleOutputs.addNamedOutput(job, "leadvehicles", TextOutputFormat.class, Text.class, Text.class);
MultipleOutputs.addNamedOutput(job, "leadattributes", TextOutputFormat.class, Text.class, Text.class);
 
MultipleOutputs.addNamedOutput(job, "leadvehiclesoptions", TextOutputFormat.class, Text.class, Text.class);
MultipleOutputs.addNamedOutput(job, "leadvehiclesattributes", TextOutputFormat.class, Text.class, Text.class);
MultipleOutputs.addNamedOutput(job, "leadvehiclessearchparameters", TextOutputFormat.class, Text.class, Text.class);
 
TableMapReduceUtil.initTableMapperJob(TABLE_NAME, scan, LeadTableMap.class, ImmutableBytesWritable.class, Result.class, job);
 
return job;
}
 
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
/*
if (otherArgs.length != 3) {
System.err.println("Usage: bin/hadoop jar dwh-hbase-processor-<version>-deploy.zip com.edmunds.dwh.lead.LeadProcessor <startdate> <enddate> <outputdirectory>");
System.exit(1);
}
*/
Job job = createSubmittableJob(conf, otherArgs);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
gistfile1.txt
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
====
 
2011-06-14 16:44:51,113 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=hbase ip=/10.128.170.126 cmd=listStatus src=/hbase/.oldlogs dst=null perm=null
2011-06-14 16:45:24,013 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Number of transactions: 908 Total time for transactions(ms): 8Number of transactions batched in Syncs: 196 Number of syncs: 491 SyncTimes(ms): 84 87 65
2011-06-14 16:45:24,014 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=mkdirs src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751 dst=null perm=tchan:supergroup:rwxr-xr-x
2011-06-14 16:45:24,015 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=setPermission src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751 dst=null perm=tchan:supergroup:rwx------
2011-06-14 16:45:24,017 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=mkdirs src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars dst=null perm=tchan:supergroup:rwxr-xr-x
2011-06-14 16:45:24,019 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=setPermission src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars dst=null perm=tchan:supergroup:rwx------
2011-06-14 16:45:24,035 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=create src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/zookeeper-3.3.1.jar dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:24,053 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.allocateBlock: /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/zookeeper-3.3.1.jar. blk_-9098653819277089874_271226
2011-06-14 16:45:24,157 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.127:50010 is added to blk_-9098653819277089874_271226 size 596183
2011-06-14 16:45:24,158 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.128:50010 is added to blk_-9098653819277089874_271226 size 596183
2011-06-14 16:45:24,159 INFO org.apache.hadoop.hdfs.StateChange: Removing lease on file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/zookeeper-3.3.1.jar from client DFSClient_-1576401865
2011-06-14 16:45:24,159 INFO org.apache.hadoop.hdfs.StateChange: DIR* NameSystem.completeFile: file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/zookeeper-3.3.1.jar is closed by DFSClient_-1576401865
2011-06-14 16:45:24,161 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Increasing replication for file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/zookeeper-3.3.1.jar. New replication is 10
2011-06-14 16:45:24,161 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=setReplication src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/zookeeper-3.3.1.jar dst=null perm=null
2011-06-14 16:45:24,168 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=create src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hadoop-core-0.20.2+737.jar dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:24,179 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.allocateBlock: /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hadoop-core-0.20.2+737.jar. blk_-3349667616300216761_271227
2011-06-14 16:45:24,315 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.128:50010 is added to blk_-3349667616300216761_271227 size 3376361
2011-06-14 16:45:24,315 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.127:50010 is added to blk_-3349667616300216761_271227 size 3376361
2011-06-14 16:45:24,316 INFO org.apache.hadoop.hdfs.StateChange: Removing lease on file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hadoop-core-0.20.2+737.jar from client DFSClient_-1576401865
2011-06-14 16:45:24,317 INFO org.apache.hadoop.hdfs.StateChange: DIR* NameSystem.completeFile: file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hadoop-core-0.20.2+737.jar is closed by DFSClient_-1576401865
2011-06-14 16:45:24,318 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Increasing replication for file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hadoop-core-0.20.2+737.jar. New replication is 10
2011-06-14 16:45:24,318 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=setReplication src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hadoop-core-0.20.2+737.jar dst=null perm=null
2011-06-14 16:45:24,324 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=create src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/guava-r05.jar dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:24,326 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.allocateBlock: /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/guava-r05.jar. blk_8010350731178143223_271228
2011-06-14 16:45:24,362 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.127:50010 is added to blk_8010350731178143223_271228 size 934783
2011-06-14 16:45:24,362 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.128:50010 is added to blk_8010350731178143223_271228 size 934783
2011-06-14 16:45:24,363 INFO org.apache.hadoop.hdfs.StateChange: Removing lease on file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/guava-r05.jar from client DFSClient_-1576401865
2011-06-14 16:45:24,363 INFO org.apache.hadoop.hdfs.StateChange: DIR* NameSystem.completeFile: file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/guava-r05.jar is closed by DFSClient_-1576401865
2011-06-14 16:45:24,365 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Increasing replication for file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/guava-r05.jar. New replication is 10
2011-06-14 16:45:24,365 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=setReplication src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/guava-r05.jar dst=null perm=null
2011-06-14 16:45:24,372 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=create src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hbase-0.20.0.jar dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:24,374 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.allocateBlock: /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hbase-0.20.0.jar. blk_-1535982081980172384_271229
2011-06-14 16:45:24,441 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.127:50010 is added to blk_-1535982081980172384_271229 size 2058292
2011-06-14 16:45:24,441 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.128:50010 is added to blk_-1535982081980172384_271229 size 2058292
2011-06-14 16:45:24,442 INFO org.apache.hadoop.hdfs.StateChange: Removing lease on file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hbase-0.20.0.jar from client DFSClient_-1576401865
2011-06-14 16:45:24,442 INFO org.apache.hadoop.hdfs.StateChange: DIR* NameSystem.completeFile: file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hbase-0.20.0.jar is closed by DFSClient_-1576401865
2011-06-14 16:45:24,444 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Increasing replication for file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hbase-0.20.0.jar. New replication is 10
2011-06-14 16:45:24,444 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=setReplication src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hbase-0.20.0.jar dst=null perm=null
2011-06-14 16:45:24,480 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=create src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.jar dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:24,482 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.allocateBlock: /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.jar. blk_1491513275283502681_271230
2011-06-14 16:45:24,607 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.127:50010 is added to blk_1491513275283502681_271230 size 4709616
2011-06-14 16:45:24,607 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.128:50010 is added to blk_1491513275283502681_271230 size 4709616
2011-06-14 16:45:24,608 INFO org.apache.hadoop.hdfs.StateChange: Removing lease on file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.jar from client DFSClient_-1576401865
2011-06-14 16:45:24,609 INFO org.apache.hadoop.hdfs.StateChange: DIR* NameSystem.completeFile: file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.jar is closed by DFSClient_-1576401865
2011-06-14 16:45:24,610 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Increasing replication for file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.jar. New replication is 10
2011-06-14 16:45:24,610 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=setReplication src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.jar dst=null perm=null
2011-06-14 16:45:24,612 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=setPermission src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.jar dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:28,245 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=create src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:28,246 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=setPermission src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:28,247 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Increasing replication for file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split. New replication is 10
2011-06-14 16:45:28,248 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=setReplication src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split dst=null perm=null
2011-06-14 16:45:28,259 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.allocateBlock: /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split. blk_-4086022364936456815_271231
2011-06-14 16:45:28,269 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.127:50010 is added to blk_-4086022364936456815_271231 size 20391
2011-06-14 16:45:28,270 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.128:50010 is added to blk_-4086022364936456815_271231 size 20391
2011-06-14 16:45:28,272 INFO org.apache.hadoop.hdfs.StateChange: Removing lease on file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split from client DFSClient_-1576401865
2011-06-14 16:45:28,272 INFO org.apache.hadoop.hdfs.StateChange: DIR* NameSystem.completeFile: file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split is closed by DFSClient_-1576401865
2011-06-14 16:45:28,275 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=create src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.splitmetainfo dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:28,276 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=setPermission src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.splitmetainfo dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:28,282 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.allocateBlock: /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.splitmetainfo. blk_-4303302880665635121_271232
2011-06-14 16:45:28,290 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.127:50010 is added to blk_-4303302880665635121_271232 size 7278
2011-06-14 16:45:28,291 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.128:50010 is added to blk_-4303302880665635121_271232 size 7278
2011-06-14 16:45:28,292 INFO org.apache.hadoop.hdfs.StateChange: Removing lease on file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.splitmetainfo from client DFSClient_-1576401865
2011-06-14 16:45:28,292 INFO org.apache.hadoop.hdfs.StateChange: DIR* NameSystem.completeFile: file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.splitmetainfo is closed by DFSClient_-1576401865
2011-06-14 16:45:28,302 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=create src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.xml dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:28,304 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.32.35.188 cmd=setPermission src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.xml dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:28,370 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.allocateBlock: /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.xml. blk_6216884281917722598_271233
2011-06-14 16:45:28,382 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.127:50010 is added to blk_6216884281917722598_271233 size 43977
2011-06-14 16:45:28,382 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.128:50010 is added to blk_6216884281917722598_271233 size 43977
2011-06-14 16:45:28,383 INFO org.apache.hadoop.hdfs.StateChange: Removing lease on file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.xml from client DFSClient_-1576401865
2011-06-14 16:45:28,384 INFO org.apache.hadoop.hdfs.StateChange: DIR* NameSystem.completeFile: file /usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.xml is closed by DFSClient_-1576401865
2011-06-14 16:45:28,388 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.126 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.xml dst=null perm=null
2011-06-14 16:45:28,420 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=mapred ip=/10.128.170.126 cmd=create src=/mapred/system/job_201105241707_7751/jobToken dst=null perm=mapred:supergroup:rw-r--r--
2011-06-14 16:45:28,421 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.allocateBlock: /mapred/system/job_201105241707_7751/jobToken. blk_194440756771642569_271234
2011-06-14 16:45:28,427 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.128:50010 is added to blk_194440756771642569_271234 size 106
2011-06-14 16:45:28,427 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.addStoredBlock: blockMap updated: 10.128.170.127:50010 is added to blk_194440756771642569_271234 size 106
2011-06-14 16:45:28,428 INFO org.apache.hadoop.hdfs.StateChange: Removing lease on file /mapred/system/job_201105241707_7751/jobToken from client DFSClient_-103897045
2011-06-14 16:45:28,428 INFO org.apache.hadoop.hdfs.StateChange: DIR* NameSystem.completeFile: file /mapred/system/job_201105241707_7751/jobToken is closed by DFSClient_-103897045
2011-06-14 16:45:28,429 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.126 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.splitmetainfo dst=null perm=null
2011-06-14 16:45:29,335 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=mapred ip=/10.128.170.127 cmd=open src=/mapred/system/job_201105241707_7751/jobToken dst=null perm=null
2011-06-14 16:45:29,345 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.127 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.xml dst=null perm=null
2011-06-14 16:45:29,364 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.127 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.jar dst=null perm=null
2011-06-14 16:45:29,495 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.127 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/zookeeper-3.3.1.jar dst=null perm=null
2011-06-14 16:45:29,527 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.127 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hadoop-core-0.20.2+737.jar dst=null perm=null
2011-06-14 16:45:29,591 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.127 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/guava-r05.jar dst=null perm=null
2011-06-14 16:45:29,630 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.127 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hbase-0.20.0.jar dst=null perm=null
2011-06-14 16:45:30,780 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.127 cmd=mkdirs src=/tmp/junk2/_temporary dst=null perm=tchan:supergroup:rwxr-xr-x
2011-06-14 16:45:32,347 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=mapred ip=/10.128.170.128 cmd=open src=/mapred/system/job_201105241707_7751/jobToken dst=null perm=null
2011-06-14 16:45:32,353 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.xml dst=null perm=null
2011-06-14 16:45:32,374 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.jar dst=null perm=null
2011-06-14 16:45:32,506 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/zookeeper-3.3.1.jar dst=null perm=null
2011-06-14 16:45:32,584 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hadoop-core-0.20.2+737.jar dst=null perm=null
2011-06-14 16:45:32,655 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/guava-r05.jar dst=null perm=null
2011-06-14 16:45:32,696 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/libjars/hbase-0.20.0.jar dst=null perm=null
2011-06-14 16:45:34,956 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split dst=null perm=null
2011-06-14 16:45:35,153 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000016_0/part-m-00016 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,188 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split dst=null perm=null
2011-06-14 16:45:35,276 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000013_0/part-m-00013 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,373 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000016_0/lead-m-00016 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,379 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000016_0/leadcustomerattributes-m-00016 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,392 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000016_0/leadvehiclesattributes-m-00016 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,427 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000016_0/leadvehicles-m-00016 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,432 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000013_0/lead-m-00013 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,439 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000013_0/leadcustomerattributes-m-00013 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,447 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000016_0/leadattributes-m-00016 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,478 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000013_0/leadvehiclesattributes-m-00013 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,502 WARN org.apache.hadoop.hdfs.StateChange: DIR* NameSystem.startFile: failed to create file /tmp/junk2/_temporary/_attempt_201105241707_7751_m_000016_0/lead-m-00016 for DFSClient_attempt_201105241707_7751_m_000016_0 on client 10.128.170.128 because current leaseholder is trying to recreate file.
2011-06-14 16:45:35,502 INFO org.apache.hadoop.ipc.Server: IPC Server handler 6 on 8020, call create(/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000016_0/lead-m-00016, rwxr-xr-x, DFSClient_attempt_201105241707_7751_m_000016_0, false, 3, 67108864) from 10.128.170.128:45164: error: org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException: failed to create file /tmp/junk2/_temporary/_attempt_201105241707_7751_m_000016_0/lead-m-00016 for DFSClient_attempt_201105241707_7751_m_000016_0 on client 10.128.170.128 because current leaseholder is trying to recreate file.
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException: failed to create file /tmp/junk2/_temporary/_attempt_201105241707_7751_m_000016_0/lead-m-00016 for DFSClient_attempt_201105241707_7751_m_000016_0 on client 10.128.170.128 because current leaseholder is trying to recreate file.
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInternal(FSNamesystem.java:1169)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:1103)
at org.apache.hadoop.hdfs.server.namenode.NameNode.create(NameNode.java:526)
at sun.reflect.GeneratedMethodAccessor9.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:528)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1319)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1315)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1063)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1313)
2011-06-14 16:45:35,514 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000013_0/leadvehicles-m-00013 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,519 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000013_0/leadattributes-m-00013 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,541 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split dst=null perm=null
2011-06-14 16:45:35,546 WARN org.apache.hadoop.hdfs.StateChange: DIR* NameSystem.startFile: failed to create file /tmp/junk2/_temporary/_attempt_201105241707_7751_m_000013_0/lead-m-00013 for DFSClient_attempt_201105241707_7751_m_000013_0 on client 10.128.170.128 because current leaseholder is trying to recreate file.
2011-06-14 16:45:35,546 INFO org.apache.hadoop.ipc.Server: IPC Server handler 2 on 8020, call create(/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000013_0/lead-m-00013, rwxr-xr-x, DFSClient_attempt_201105241707_7751_m_000013_0, false, 3, 67108864) from 10.128.170.128:45165: error: org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException: failed to create file /tmp/junk2/_temporary/_attempt_201105241707_7751_m_000013_0/lead-m-00013 for DFSClient_attempt_201105241707_7751_m_000013_0 on client 10.128.170.128 because current leaseholder is trying to recreate file.
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException: failed to create file /tmp/junk2/_temporary/_attempt_201105241707_7751_m_000013_0/lead-m-00013 for DFSClient_attempt_201105241707_7751_m_000013_0 on client 10.128.170.128 because current leaseholder is trying to recreate file.
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInternal(FSNamesystem.java:1169)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:1103)
at org.apache.hadoop.hdfs.server.namenode.NameNode.create(NameNode.java:526)
at sun.reflect.GeneratedMethodAccessor9.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:528)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1319)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1315)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1063)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1313)
2011-06-14 16:45:35,560 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split dst=null perm=null
2011-06-14 16:45:35,582 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split dst=null perm=null
2011-06-14 16:45:35,598 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split dst=null perm=null
2011-06-14 16:45:35,619 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split dst=null perm=null
2011-06-14 16:45:35,626 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000010_0/part-m-00010 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,634 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split dst=null perm=null
2011-06-14 16:45:35,656 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split dst=null perm=null
2011-06-14 16:45:35,670 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000019_0/part-m-00019 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,672 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000012_0/part-m-00012 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,701 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000010_0/lead-m-00010 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,705 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000010_0/leadcustomerattributes-m-00010 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,707 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=open src=/usr/hadoop/tmp/mapred/staging/tchan/.staging/job_201105241707_7751/job.split dst=null perm=null
2011-06-14 16:45:35,708 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000017_0/part-m-00017 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,713 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000010_0/leadvehiclesoptions-m-0001dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,716 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000018_0/part-m-00018 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,721 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000010_0/leadvehiclesattributes-m-00010 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,726 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000019_0/lead-m-00019 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,730 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000019_0/leadcustomerattributes-m-00019 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,738 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000019_0/leadvehiclesattributes-m-00019 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,742 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000010_0/leadvehicles-m-00010 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,745 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000010_0/leadattributes-m-00010 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,747 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000015_0/part-m-00015 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,751 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000012_0/lead-m-00012 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,757 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000012_0/leadcustomerattributes-m-00012 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,759 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit: ugi=tchan ip=/10.128.170.128 cmd=create src=/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000019_0/leadvehicles-m-00019 dst=null perm=tchan:supergroup:rw-r--r--
2011-06-14 16:45:35,764 WARN org.apache.hadoop.hdfs.StateChange: DIR* NameSystem.startFile: failed to create file /tmp/junk2/_temporary/_attempt_201105241707_7751_m_000010_0/lead-m-00010 for DFSClient_attempt_201105241707_7751_m_000010_0 on client 10.128.170.128 because current leaseholder is trying to recreate file.
2011-06-14 16:45:35,764 INFO org.apache.hadoop.ipc.Server: IPC Server handler 8 on 8020, call create(/tmp/junk2/_temporary/_attempt_201105241707_7751_m_000010_0/lead-m-00010, rwxr-xr-x, DFSClient_attempt_201105241707_7751_m_000010_0, false, 3, 67108864) from 10.128.170.128:45172: error: org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException: failed to create file /tmp/junk2/_temporary/_attempt_201105241707_7751_m_000010_0/lead-m-00010 for DFSClient_attempt_201105241707_7751_m_000010_0 on client 10.128.170.128 because current leaseholder is trying to recreate file.
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException: failed to create file /tmp/junk2/_temporary/_attempt_201105241707_7751_m_000010_0/lead-m-00010 for DFSClient_attempt_201105241707_7751_m_000010_0 on client 10.128.170.128 because current leaseholder is trying to recreate file.
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInternal(FSNamesystem.java:1169)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:1103)
at org.apache.hadoop.hdfs.server.namenode.NameNode.create(NameNode.java:526)
at sun.reflect.GeneratedMethodAccessor9.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:528)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1319)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1315)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1063)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1313)

Please sign in to comment on this gist.

Something went wrong with that request. Please try again.