hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject svn commit: r1532967 [2/4] - in /hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project: ./ bin/ conf/ dev-support/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ hadoop-mapreduce-client/hadoop-mapreduce-...
Date Thu, 17 Oct 2013 05:33:02 GMT
Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java Thu Oct 17 05:32:42 2013
@@ -136,10 +136,21 @@ public class MRApp extends MRAppMaster {
   }
 
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
+      boolean cleanOnStart, Clock clock, boolean unregistered) {
+    this(maps, reduces, autoComplete, testName, cleanOnStart, 1, clock,
+        unregistered);
+  }
+
+  public MRApp(int maps, int reduces, boolean autoComplete, String testName,
       boolean cleanOnStart) {
     this(maps, reduces, autoComplete, testName, cleanOnStart, 1);
   }
-  
+
+  public MRApp(int maps, int reduces, boolean autoComplete, String testName,
+      boolean cleanOnStart, boolean unregistered) {
+    this(maps, reduces, autoComplete, testName, cleanOnStart, 1, unregistered);
+  }
+
   @Override
   protected void initJobCredentialsAndUGI(Configuration conf) {
     // Fake a shuffle secret that normally is provided by the job client.
@@ -170,22 +181,42 @@ public class MRApp extends MRAppMaster {
   }
 
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
+      boolean cleanOnStart, int startCount, boolean unregistered) {
+    this(maps, reduces, autoComplete, testName, cleanOnStart, startCount,
+        new SystemClock(), unregistered);
+  }
+
+  public MRApp(int maps, int reduces, boolean autoComplete, String testName,
+      boolean cleanOnStart, int startCount, Clock clock, boolean unregistered) {
+    this(getApplicationAttemptId(applicationId, startCount), getContainerId(
+      applicationId, startCount), maps, reduces, autoComplete, testName,
+      cleanOnStart, startCount, clock, unregistered);
+  }
+
+  public MRApp(int maps, int reduces, boolean autoComplete, String testName,
       boolean cleanOnStart, int startCount, Clock clock) {
     this(getApplicationAttemptId(applicationId, startCount), getContainerId(
       applicationId, startCount), maps, reduces, autoComplete, testName,
-      cleanOnStart, startCount, clock);
+      cleanOnStart, startCount, clock, true);
+  }
+
+  public MRApp(ApplicationAttemptId appAttemptId, ContainerId amContainerId,
+      int maps, int reduces, boolean autoComplete, String testName,
+      boolean cleanOnStart, int startCount, boolean unregistered) {
+    this(appAttemptId, amContainerId, maps, reduces, autoComplete, testName,
+        cleanOnStart, startCount, new SystemClock(), unregistered);
   }
 
   public MRApp(ApplicationAttemptId appAttemptId, ContainerId amContainerId,
       int maps, int reduces, boolean autoComplete, String testName,
       boolean cleanOnStart, int startCount) {
     this(appAttemptId, amContainerId, maps, reduces, autoComplete, testName,
-        cleanOnStart, startCount, new SystemClock());
+        cleanOnStart, startCount, new SystemClock(), true);
   }
 
   public MRApp(ApplicationAttemptId appAttemptId, ContainerId amContainerId,
       int maps, int reduces, boolean autoComplete, String testName,
-      boolean cleanOnStart, int startCount, Clock clock) {
+      boolean cleanOnStart, int startCount, Clock clock, boolean unregistered) {
     super(appAttemptId, amContainerId, NM_HOST, NM_PORT, NM_HTTP_PORT, clock, System
         .currentTimeMillis(), MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
     this.testWorkDir = new File("target", testName);
@@ -204,6 +235,9 @@ public class MRApp extends MRAppMaster {
     this.maps = maps;
     this.reduces = reduces;
     this.autoComplete = autoComplete;
+    // If safeToReportTerminationToUser is set to true, we can verify whether
+    // the job can reaches the final state when MRAppMaster shuts down.
+    this.successfullyUnregistered.set(unregistered);
   }
 
   @Override
@@ -229,16 +263,22 @@ public class MRApp extends MRAppMaster {
   }
 
   public Job submit(Configuration conf) throws Exception {
+    //TODO: fix the bug where the speculator gets events with 
+    //not-fully-constructed objects. For now, disable speculative exec
+    return submit(conf, false, false);
+  }
+
+  public Job submit(Configuration conf, boolean mapSpeculative,
+      boolean reduceSpeculative) throws Exception {
     String user = conf.get(MRJobConfig.USER_NAME, UserGroupInformation
-      .getCurrentUser().getShortUserName());
+        .getCurrentUser().getShortUserName());
     conf.set(MRJobConfig.USER_NAME, user);
     conf.set(MRJobConfig.MR_AM_STAGING_DIR, testAbsPath.toString());
     conf.setBoolean(MRJobConfig.MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR, true);
-    //TODO: fix the bug where the speculator gets events with 
-    //not-fully-constructed objects. For now, disable speculative exec
-    LOG.info("****DISABLING SPECULATIVE EXECUTION*****");
-    conf.setBoolean(MRJobConfig.MAP_SPECULATIVE, false);
-    conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, false);
+    // TODO: fix the bug where the speculator gets events with
+    // not-fully-constructed objects. For now, disable speculative exec
+    conf.setBoolean(MRJobConfig.MAP_SPECULATIVE, mapSpeculative);
+    conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, reduceSpeculative);
 
     init(conf);
     start();
@@ -247,7 +287,7 @@ public class MRApp extends MRAppMaster {
 
     // Write job.xml
     String jobFile = MRApps.getJobFile(conf, user,
-      TypeConverter.fromYarn(job.getID()));
+        TypeConverter.fromYarn(job.getID()));
     LOG.info("Writing job conf to " + jobFile);
     new File(jobFile).getParentFile().mkdirs();
     conf.writeXml(new FileOutputStream(jobFile));

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java Thu Oct 17 05:32:42 2013
@@ -135,4 +135,11 @@ public class MockAppContext implements A
   public boolean isLastAMRetry() {
     return false;
   }
+
+  @Override
+  public boolean hasSuccessfullyUnregistered() {
+    // bogus - Not Required
+    return true;
+  }
+
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java Thu Oct 17 05:32:42 2013
@@ -18,19 +18,48 @@
 
 package org.apache.hadoop.mapreduce.v2.app;
 
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.when;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.PrintStream;
 import java.net.Proxy;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.mapred.JobContext;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
+import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
+import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.junit.Assert;
 import org.junit.Test;
-import org.mockito.Mockito;
 
 /**
  * Tests job end notification
  *
  */
+@SuppressWarnings("unchecked")
 public class TestJobEndNotifier extends JobEndNotifier {
 
   //Test maximum retries is capped by MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS
@@ -133,7 +162,7 @@ public class TestJobEndNotifier extends 
   public void testNotifyRetries() throws InterruptedException {
     Configuration conf = new Configuration();
     conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_URL, "http://nonexistent");
-    JobReport jobReport = Mockito.mock(JobReport.class);
+    JobReport jobReport = mock(JobReport.class);
  
     long startTime = System.currentTimeMillis();
     this.notificationCount = 0;
@@ -162,4 +191,201 @@ public class TestJobEndNotifier extends 
 
   }
 
+  @Test
+  public void testNotificationOnLastRetryNormalShutdown() throws Exception {
+    HttpServer server = startHttpServer();
+    // Act like it is the second attempt. Default max attempts is 2
+    MRApp app = spy(new MRAppWithCustomContainerAllocator(
+        2, 2, true, this.getClass().getName(), true, 2, true));
+    doNothing().when(app).sysexit();
+    Configuration conf = new Configuration();
+    conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
+        JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
+    JobImpl job = (JobImpl)app.submit(conf);
+    app.waitForInternalState(job, JobStateInternal.SUCCEEDED);
+    // Unregistration succeeds: successfullyUnregistered is set
+    app.shutDownJob();
+    Assert.assertEquals(true, app.isLastAMRetry());
+    Assert.assertEquals(1, JobEndServlet.calledTimes);
+    Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED",
+        JobEndServlet.requestUri.getQuery());
+    Assert.assertEquals(JobState.SUCCEEDED.toString(),
+      JobEndServlet.foundJobState);
+    server.stop();
+  }
+
+  @Test
+  public void testAbsentNotificationOnNotLastRetryUnregistrationFailure()
+      throws Exception {
+    HttpServer server = startHttpServer();
+    MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
+        this.getClass().getName(), true, 1, false));
+    doNothing().when(app).sysexit();
+    Configuration conf = new Configuration();
+    conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
+        JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
+    JobImpl job = (JobImpl)app.submit(conf);
+    app.waitForState(job, JobState.RUNNING);
+    app.getContext().getEventHandler()
+      .handle(new JobEvent(app.getJobId(), JobEventType.JOB_AM_REBOOT));
+    app.waitForInternalState(job, JobStateInternal.REBOOT);
+    // Now shutdown.
+    // Unregistration fails: isLastAMRetry is recalculated, this is not
+    app.shutDownJob();
+    // Not the last AM attempt. So user should that the job is still running.
+    app.waitForState(job, JobState.RUNNING);
+    Assert.assertEquals(false, app.isLastAMRetry());
+    Assert.assertEquals(0, JobEndServlet.calledTimes);
+    Assert.assertEquals(null, JobEndServlet.requestUri);
+    Assert.assertEquals(null, JobEndServlet.foundJobState);
+    server.stop();
+  }
+
+  @Test
+  public void testNotificationOnLastRetryUnregistrationFailure()
+      throws Exception {
+    HttpServer server = startHttpServer();
+    MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
+        this.getClass().getName(), true, 2, false));
+    doNothing().when(app).sysexit();
+    Configuration conf = new Configuration();
+    conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
+        JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
+    JobImpl job = (JobImpl)app.submit(conf);
+    app.waitForState(job, JobState.RUNNING);
+    app.getContext().getEventHandler()
+      .handle(new JobEvent(app.getJobId(), JobEventType.JOB_AM_REBOOT));
+    app.waitForInternalState(job, JobStateInternal.REBOOT);
+    // Now shutdown. User should see FAILED state.
+    // Unregistration fails: isLastAMRetry is recalculated, this is
+    app.shutDownJob();
+    Assert.assertEquals(true, app.isLastAMRetry());
+    Assert.assertEquals(1, JobEndServlet.calledTimes);
+    Assert.assertEquals("jobid=" + job.getID() + "&status=FAILED",
+        JobEndServlet.requestUri.getQuery());
+    Assert.assertEquals(JobState.FAILED.toString(),
+      JobEndServlet.foundJobState);
+    server.stop();
+  }
+
+  private static HttpServer startHttpServer() throws Exception {
+    new File(System.getProperty(
+        "build.webapps", "build/webapps") + "/test").mkdirs();
+    HttpServer server = new HttpServer.Builder().setName("test")
+        .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).build();
+    server.addServlet("jobend", "/jobend", JobEndServlet.class);
+    server.start();
+
+    JobEndServlet.calledTimes = 0;
+    JobEndServlet.requestUri = null;
+    JobEndServlet.baseUrl = "http://localhost:" + server.getPort() + "/";
+    JobEndServlet.foundJobState = null;
+    return server;
+  }
+
+  @SuppressWarnings("serial")
+  public static class JobEndServlet extends HttpServlet {
+    public static volatile int calledTimes = 0;
+    public static URI requestUri;
+    public static String baseUrl;
+    public static String foundJobState;
+
+    @Override
+    public void doGet(HttpServletRequest request, HttpServletResponse response)
+        throws ServletException, IOException {
+      InputStreamReader in = new InputStreamReader(request.getInputStream());
+      PrintStream out = new PrintStream(response.getOutputStream());
+
+      calledTimes++;
+      try {
+        requestUri = new URI(null, null,
+            request.getRequestURI(), request.getQueryString(), null);
+        foundJobState = request.getParameter("status");
+      } catch (URISyntaxException e) {
+      }
+
+      in.close();
+      out.close();
+    }
+  }
+
+  private class MRAppWithCustomContainerAllocator extends MRApp {
+
+    private boolean crushUnregistration;
+
+    public MRAppWithCustomContainerAllocator(int maps, int reduces,
+        boolean autoComplete, String testName, boolean cleanOnStart,
+        int startCount, boolean crushUnregistration) {
+      super(maps, reduces, autoComplete, testName, cleanOnStart, startCount,
+          false);
+      this.crushUnregistration = crushUnregistration;
+    }
+
+    @Override
+    protected ContainerAllocator createContainerAllocator(
+        ClientService clientService, AppContext context) {
+      context = spy(context);
+      when(context.getEventHandler()).thenReturn(null);
+      when(context.getApplicationID()).thenReturn(null);
+      return new CustomContainerAllocator(this, context);
+    }
+
+    private class CustomContainerAllocator
+        extends RMCommunicator
+        implements ContainerAllocator, RMHeartbeatHandler {
+      private MRAppWithCustomContainerAllocator app;
+      private MRAppContainerAllocator allocator =
+          new MRAppContainerAllocator();
+
+      public CustomContainerAllocator(
+          MRAppWithCustomContainerAllocator app, AppContext context) {
+        super(null, context);
+        this.app = app;
+      }
+
+      @Override
+      public void serviceInit(Configuration conf) {
+      }
+
+      @Override
+      public void serviceStart() {
+      }
+
+      @Override
+      public void serviceStop() {
+        unregister();
+      }
+
+      @Override
+      protected void doUnregistration()
+          throws YarnException, IOException, InterruptedException {
+        if (crushUnregistration) {
+          app.successfullyUnregistered.set(true);
+        } else {
+          throw new YarnException("test exception");
+        }
+      }
+
+      @Override
+      public void handle(ContainerAllocatorEvent event) {
+        allocator.handle(event);
+      }
+
+      @Override
+      public long getLastHeartbeatTime() {
+        return allocator.getLastHeartbeatTime();
+      }
+
+      @Override
+      public void runOnNextHeartbeat(Runnable callback) {
+        allocator.runOnNextHeartbeat(callback);
+      }
+
+      @Override
+      protected void heartbeat() throws Exception {
+      }
+    }
+
+  }
+
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java Thu Oct 17 05:32:42 2013
@@ -29,7 +29,6 @@ import java.util.Iterator;
 import junit.framework.Assert;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
@@ -54,15 +53,12 @@ import org.apache.hadoop.mapreduce.v2.ap
 import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
 import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
 import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.util.Clock;
 import org.junit.Test;
 
 /**
@@ -374,8 +370,22 @@ public class TestMRApp {
     app.waitForState(job, JobState.ERROR);
   }
 
+  @SuppressWarnings("resource")
   @Test
-  public void testJobRebootNotLastRetry() throws Exception {
+  public void testJobSuccess() throws Exception {
+    MRApp app = new MRApp(2, 2, true, this.getClass().getName(), true, false);
+    JobImpl job = (JobImpl) app.submit(new Configuration());
+    app.waitForInternalState(job, JobStateInternal.SUCCEEDED);
+    // AM is not unregistered
+    Assert.assertEquals(JobState.RUNNING, job.getState());
+    // imitate that AM is unregistered
+    app.successfullyUnregistered.set(true);
+    app.waitForState(job, JobState.SUCCEEDED);
+  }
+
+  @Test
+  public void testJobRebootNotLastRetryOnUnregistrationFailure()
+      throws Exception {
     MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
     Job job = app.submit(new Configuration());
     app.waitForState(job, JobState.RUNNING);
@@ -394,10 +404,12 @@ public class TestMRApp {
   }
 
   @Test
-  public void testJobRebootOnLastRetry() throws Exception {
+  public void testJobRebootOnLastRetryOnUnregistrationFailure()
+      throws Exception {
     // make startCount as 2 since this is last retry which equals to
     // DEFAULT_MAX_AM_RETRY
-    MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true, 2);
+    // The last param mocks the unregistration failure
+    MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true, 2, false);
 
     Configuration conf = new Configuration();
     Job job = app.submit(conf);
@@ -411,8 +423,10 @@ public class TestMRApp {
     app.getContext().getEventHandler().handle(new JobEvent(job.getID(),
       JobEventType.JOB_AM_REBOOT));
 
-    // return exteranl state as ERROR if this is the last retry
-    app.waitForState(job, JobState.ERROR);
+    app.waitForInternalState((JobImpl) job, JobStateInternal.REBOOT);
+    // return exteranl state as RUNNING if this is the last retry while
+    // unregistration fails
+    app.waitForState(job, JobState.RUNNING);
   }
 
   private final class MRAppWithSpiedJob extends MRApp {

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java Thu Oct 17 05:32:42 2013
@@ -432,12 +432,22 @@ public class TestRMContainerAllocator {
   }
 
   private static class MyResourceManager extends MockRM {
+
+    private static long fakeClusterTimeStamp = System.currentTimeMillis();
     
     public MyResourceManager(Configuration conf) {
       super(conf);
     }
 
     @Override
+    public void serviceStart() throws Exception {
+      super.serviceStart();
+      // Ensure that the application attempt IDs for all the tests are the same
+      // The application attempt IDs will be used as the login user names
+      MyResourceManager.setClusterTimeStamp(fakeClusterTimeStamp);
+    }
+
+    @Override
     protected Dispatcher createDispatcher() {
       return new DrainDispatcher();
     }
@@ -870,8 +880,10 @@ public class TestRMContainerAllocator {
     dispatcher.await();
 
     assigned = allocator.schedule();
+    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
     dispatcher.await();
-    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());    
+    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+    assertBlacklistAdditionsAndRemovals(2, 0, rm);
 
     // mark h1/h2 as bad nodes
     nodeManager1.nodeHeartbeat(false);
@@ -880,12 +892,14 @@ public class TestRMContainerAllocator {
 
     assigned = allocator.schedule();
     dispatcher.await();
+    assertBlacklistAdditionsAndRemovals(0, 0, rm);
     Assert.assertEquals("No of assignments must be 0", 0, assigned.size());    
 
     nodeManager3.nodeHeartbeat(true); // Node heartbeat
     dispatcher.await();
-    assigned = allocator.schedule();    
+    assigned = allocator.schedule();
     dispatcher.await();
+    assertBlacklistAdditionsAndRemovals(0, 0, rm);
         
     Assert.assertTrue("No of assignments must be 3", assigned.size() == 3);
     
@@ -938,7 +952,7 @@ public class TestRMContainerAllocator {
     // Known=1, blacklisted=0, ignore should be false - assign first container
     assigned =
         getContainerOnHost(jobId, 1, 1024, new String[] { "h1" },
-            nodeManagers[0], dispatcher, allocator);
+            nodeManagers[0], dispatcher, allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
 
     LOG.info("Failing container _1 on H1 (Node should be blacklisted and"
@@ -948,44 +962,52 @@ public class TestRMContainerAllocator {
     allocator.sendFailure(f1);
 
     // Test single node.
+    // Known=1, blacklisted=1, ignore should be true - assign 0
+    // Because makeRemoteRequest will not be aware of it until next call
+    // The current call will send blacklisted node "h1" to RM
+    assigned =
+        getContainerOnHost(jobId, 2, 1024, new String[] { "h1" },
+            nodeManagers[0], dispatcher, allocator, 1, 0, 0, 1, rm);
+    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+
     // Known=1, blacklisted=1, ignore should be true - assign 1
     assigned =
         getContainerOnHost(jobId, 2, 1024, new String[] { "h1" },
-            nodeManagers[0], dispatcher, allocator);
+            nodeManagers[0], dispatcher, allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
 
     nodeManagers[nmNum] = registerNodeManager(nmNum++, rm, dispatcher);
     // Known=2, blacklisted=1, ignore should be true - assign 1 anyway.
     assigned =
         getContainerOnHost(jobId, 3, 1024, new String[] { "h2" },
-            nodeManagers[1], dispatcher, allocator);
+            nodeManagers[1], dispatcher, allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
 
     nodeManagers[nmNum] = registerNodeManager(nmNum++, rm, dispatcher);
     // Known=3, blacklisted=1, ignore should be true - assign 1 anyway.
     assigned =
         getContainerOnHost(jobId, 4, 1024, new String[] { "h3" },
-            nodeManagers[2], dispatcher, allocator);
+            nodeManagers[2], dispatcher, allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
 
     // Known=3, blacklisted=1, ignore should be true - assign 1
     assigned =
         getContainerOnHost(jobId, 5, 1024, new String[] { "h1" },
-            nodeManagers[0], dispatcher, allocator);
+            nodeManagers[0], dispatcher, allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
 
     nodeManagers[nmNum] = registerNodeManager(nmNum++, rm, dispatcher);
     // Known=4, blacklisted=1, ignore should be false - assign 1 anyway
     assigned =
         getContainerOnHost(jobId, 6, 1024, new String[] { "h4" },
-            nodeManagers[3], dispatcher, allocator);
+            nodeManagers[3], dispatcher, allocator, 0, 0, 1, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
 
     // Test blacklisting re-enabled.
     // Known=4, blacklisted=1, ignore should be false - no assignment on h1
     assigned =
         getContainerOnHost(jobId, 7, 1024, new String[] { "h1" },
-            nodeManagers[0], dispatcher, allocator);
+            nodeManagers[0], dispatcher, allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
     // RMContainerRequestor would have created a replacement request.
 
@@ -994,17 +1016,24 @@ public class TestRMContainerAllocator {
     allocator.sendFailure(f2);
 
     // Test ignore blacklisting re-enabled
+    // Known=4, blacklisted=2, ignore should be true. Should assign 0
+    // container for the same reason above.
+    assigned =
+        getContainerOnHost(jobId, 8, 1024, new String[] { "h1" },
+            nodeManagers[0], dispatcher, allocator, 1, 0, 0, 2, rm);
+    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+
     // Known=4, blacklisted=2, ignore should be true. Should assign 2
     // containers.
     assigned =
         getContainerOnHost(jobId, 8, 1024, new String[] { "h1" },
-            nodeManagers[0], dispatcher, allocator);
+            nodeManagers[0], dispatcher, allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 2", 2, assigned.size());
 
     // Known=4, blacklisted=2, ignore should be true.
     assigned =
         getContainerOnHost(jobId, 9, 1024, new String[] { "h2" },
-            nodeManagers[1], dispatcher, allocator);
+            nodeManagers[1], dispatcher, allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
 
     // Test blacklist while ignore blacklisting enabled
@@ -1015,7 +1044,7 @@ public class TestRMContainerAllocator {
     // Known=5, blacklisted=3, ignore should be true.
     assigned =
         getContainerOnHost(jobId, 10, 1024, new String[] { "h3" },
-            nodeManagers[2], dispatcher, allocator);
+            nodeManagers[2], dispatcher, allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
     
     // Assign on 5 more nodes - to re-enable blacklisting
@@ -1024,14 +1053,14 @@ public class TestRMContainerAllocator {
       assigned =
           getContainerOnHost(jobId, 11 + i, 1024,
               new String[] { String.valueOf(5 + i) }, nodeManagers[4 + i],
-              dispatcher, allocator);
+              dispatcher, allocator, 0, 0, (i == 4 ? 3 : 0), 0, rm);
       Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
     }
 
     // Test h3 (blacklisted while ignoring blacklisting) is blacklisted.
     assigned =
         getContainerOnHost(jobId, 20, 1024, new String[] { "h3" },
-            nodeManagers[2], dispatcher, allocator);
+            nodeManagers[2], dispatcher, allocator, 0, 0, 0, 0, rm);
     Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
   }
 
@@ -1045,7 +1074,9 @@ public class TestRMContainerAllocator {
   private
       List<TaskAttemptContainerAssignedEvent> getContainerOnHost(JobId jobId,
           int taskAttemptId, int memory, String[] hosts, MockNM mockNM,
-          DrainDispatcher dispatcher, MyContainerAllocator allocator)
+          DrainDispatcher dispatcher, MyContainerAllocator allocator,
+          int expectedAdditions1, int expectedRemovals1,
+          int expectedAdditions2, int expectedRemovals2, MyResourceManager rm)
           throws Exception {
     ContainerRequestEvent reqEvent =
         createReq(jobId, taskAttemptId, memory, hosts);
@@ -1054,6 +1085,8 @@ public class TestRMContainerAllocator {
     // Send the request to the RM
     List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
     dispatcher.await();
+    assertBlacklistAdditionsAndRemovals(
+        expectedAdditions1, expectedRemovals1, rm);
     Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
 
     // Heartbeat from the required nodeManager
@@ -1062,6 +1095,8 @@ public class TestRMContainerAllocator {
 
     assigned = allocator.schedule();
     dispatcher.await();
+    assertBlacklistAdditionsAndRemovals(
+        expectedAdditions2, expectedRemovals2, rm);
     return assigned;
   }
  
@@ -1127,6 +1162,7 @@ public class TestRMContainerAllocator {
     LOG.info("RM Heartbeat (To process the scheduled containers)");
     assigned = allocator.schedule();
     dispatcher.await();
+    assertBlacklistAdditionsAndRemovals(0, 0, rm);
     Assert.assertEquals("No of assignments must be 1", 1, assigned.size());    
     
     LOG.info("Failing container _1 on H1 (should blacklist the node)");
@@ -1143,6 +1179,7 @@ public class TestRMContainerAllocator {
     //Update the Scheduler with the new requests.
     assigned = allocator.schedule();
     dispatcher.await();
+    assertBlacklistAdditionsAndRemovals(1, 0, rm);
     Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
 
     // send another request with different resource and priority
@@ -1161,6 +1198,7 @@ public class TestRMContainerAllocator {
     LOG.info("RM Heartbeat (To process the scheduled containers)");
     assigned = allocator.schedule();
     dispatcher.await();
+    assertBlacklistAdditionsAndRemovals(0, 0, rm);
     Assert.assertEquals("No of assignments must be 0", 0, assigned.size());    
     
     //RMContainerAllocator gets assigned a p:5 on a blacklisted node.
@@ -1169,6 +1207,7 @@ public class TestRMContainerAllocator {
     LOG.info("RM Heartbeat (To process the re-scheduled containers)");
     assigned = allocator.schedule();
     dispatcher.await();
+    assertBlacklistAdditionsAndRemovals(0, 0, rm);
     Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
     
     //Hearbeat from H3 to schedule on this host.
@@ -1178,6 +1217,7 @@ public class TestRMContainerAllocator {
     
     LOG.info("RM Heartbeat (To process the re-scheduled containers for H3)");
     assigned = allocator.schedule();
+    assertBlacklistAdditionsAndRemovals(0, 0, rm);
     dispatcher.await();
      
     // For debugging
@@ -1195,7 +1235,15 @@ public class TestRMContainerAllocator {
           + " host not correct", "h3", assig.getContainer().getNodeId().getHost());
     }
   }
-  
+
+  private static void assertBlacklistAdditionsAndRemovals(
+      int expectedAdditions, int expectedRemovals, MyResourceManager rm) {
+    Assert.assertEquals(expectedAdditions,
+        rm.getMyFifoScheduler().lastBlacklistAdditions.size());
+    Assert.assertEquals(expectedRemovals,
+        rm.getMyFifoScheduler().lastBlacklistRemovals.size());
+  }
+
   private static class MyFifoScheduler extends FifoScheduler {
 
     public MyFifoScheduler(RMContext rmContext) {
@@ -1210,6 +1258,8 @@ public class TestRMContainerAllocator {
     }
     
     List<ResourceRequest> lastAsk = null;
+    List<String> lastBlacklistAdditions;
+    List<String> lastBlacklistRemovals;
     
     // override this to copy the objects otherwise FifoScheduler updates the
     // numContainers in same objects as kept by RMContainerAllocator
@@ -1226,6 +1276,8 @@ public class TestRMContainerAllocator {
         askCopy.add(reqCopy);
       }
       lastAsk = ask;
+      lastBlacklistAdditions = blacklistAdditions;
+      lastBlacklistRemovals = blacklistRemovals;
       return super.allocate(
           applicationAttemptId, askCopy, release, 
           blacklistAdditions, blacklistRemovals);

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java Thu Oct 17 05:32:42 2013
@@ -867,5 +867,12 @@ public class TestRuntimeEstimators {
     public boolean isLastAMRetry() {
       return false;
     }
+
+    @Override
+    public boolean hasSuccessfullyUnregistered() {
+      // bogus - Not Required
+      return true;
+    }
+
   }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java Thu Oct 17 05:32:42 2013
@@ -21,6 +21,7 @@ package org.apache.hadoop.mapreduce.v2.a
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
@@ -36,18 +37,17 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TypeConverter;
-import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
-import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext;
 import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
-import org.apache.hadoop.mapreduce.v2.app.client.MRClientService;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -57,7 +57,7 @@ import org.apache.hadoop.service.Service
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -75,7 +75,44 @@ import org.junit.Test;
    private Path stagingJobPath = new Path(stagingJobDir);
    private final static RecordFactory recordFactory = RecordFactoryProvider.
        getRecordFactory(null);
-   
+
+   @Test
+   public void testDeletionofStagingOnUnregistrationFailure()
+       throws IOException {
+     testDeletionofStagingOnUnregistrationFailure(2, false);
+     testDeletionofStagingOnUnregistrationFailure(1, true);
+   }
+
+   @SuppressWarnings("resource")
+   private void testDeletionofStagingOnUnregistrationFailure(
+       int maxAttempts, boolean shouldHaveDeleted) throws IOException {
+     conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
+     fs = mock(FileSystem.class);
+     when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
+     //Staging Dir exists
+     String user = UserGroupInformation.getCurrentUser().getShortUserName();
+     Path stagingDir = MRApps.getStagingAreaDir(conf, user);
+     when(fs.exists(stagingDir)).thenReturn(true);
+     ApplicationId appId = ApplicationId.newInstance(0, 1);
+     ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
+     JobId jobid = recordFactory.newRecordInstance(JobId.class);
+     jobid.setAppId(appId);
+     TestMRApp appMaster = new TestMRApp(attemptId, null,
+         JobStateInternal.RUNNING, maxAttempts);
+     appMaster.crushUnregistration = true;
+     appMaster.init(conf);
+     appMaster.start();
+     appMaster.shutDownJob();
+     ((RunningAppContext) appMaster.getContext()).computeIsLastAMRetry();
+     if (shouldHaveDeleted) {
+       Assert.assertEquals(new Boolean(true), appMaster.isLastAMRetry());
+       verify(fs).delete(stagingJobPath, true);
+     } else {
+       Assert.assertEquals(new Boolean(false), appMaster.isLastAMRetry());
+       verify(fs, never()).delete(stagingJobPath, true);
+     }
+   }
+
    @Test
    public void testDeletionofStaging() throws IOException {
      conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
@@ -204,6 +241,7 @@ import org.junit.Test;
      ContainerAllocator allocator;
      boolean testIsLastAMRetry = false;
      JobStateInternal jobStateInternal;
+     boolean crushUnregistration = false;
 
      public TestMRApp(ApplicationAttemptId applicationAttemptId, 
          ContainerAllocator allocator, int maxAppAttempts) {
@@ -211,6 +249,7 @@ import org.junit.Test;
            applicationAttemptId, 1), "testhost", 2222, 3333,
            System.currentTimeMillis(), maxAppAttempts);
        this.allocator = allocator;
+       this.successfullyUnregistered.set(true);
      }
 
      public TestMRApp(ApplicationAttemptId applicationAttemptId,
@@ -229,7 +268,11 @@ import org.junit.Test;
      protected ContainerAllocator createContainerAllocator(
          final ClientService clientService, final AppContext context) {
        if(allocator == null) {
-         return super.createContainerAllocator(clientService, context);
+         if (crushUnregistration) {
+           return new CustomContainerAllocator(context);
+         } else {
+           return super.createContainerAllocator(clientService, context);
+         }
        }
        return allocator;
      }
@@ -280,6 +323,41 @@ import org.junit.Test;
      public boolean getTestIsLastAMRetry(){
        return testIsLastAMRetry;
      }
+
+    private class CustomContainerAllocator extends RMCommunicator
+        implements ContainerAllocator {
+
+      public CustomContainerAllocator(AppContext context) {
+        super(null, context);
+      }
+
+      @Override
+      public void serviceInit(Configuration conf) {
+      }
+
+      @Override
+      public void serviceStart() {
+      }
+
+      @Override
+      public void serviceStop() {
+        unregister();
+      }
+
+      @Override
+      protected void doUnregistration()
+          throws YarnException, IOException, InterruptedException {
+        throw new YarnException("test exception");
+      }
+
+      @Override
+      protected void heartbeat() throws Exception {
+      }
+
+      @Override
+      public void handle(ContainerAllocatorEvent event) {
+      }
+    }
    }
 
   private final class MRAppTestCleanup extends MRApp {

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java Thu Oct 17 05:32:42 2013
@@ -275,6 +275,7 @@ public class TestJobImpl {
 
     AppContext mockContext = mock(AppContext.class);
     when(mockContext.isLastAMRetry()).thenReturn(true);
+    when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
     JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, mockContext);
     completeJobTasks(job);
     assertJobState(job, JobStateInternal.COMMITTING);
@@ -282,7 +283,9 @@ public class TestJobImpl {
     syncBarrier.await();
     job.handle(new JobEvent(job.getID(), JobEventType.JOB_AM_REBOOT));
     assertJobState(job, JobStateInternal.REBOOT);
-    // return the external state as FAILED since this is last retry.
+    // return the external state as ERROR since this is last retry.
+    Assert.assertEquals(JobState.RUNNING, job.getState());
+    when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
     Assert.assertEquals(JobState.ERROR, job.getState());
 
     dispatcher.stop();
@@ -590,12 +593,14 @@ public class TestJobImpl {
     final JobDiagnosticsUpdateEvent diagUpdateEvent =
         new JobDiagnosticsUpdateEvent(jobId, diagMsg);
     MRAppMetrics mrAppMetrics = MRAppMetrics.create();
+    AppContext mockContext = mock(AppContext.class);
+    when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
     JobImpl job = new JobImpl(jobId, Records
         .newRecord(ApplicationAttemptId.class), new Configuration(),
         mock(EventHandler.class),
         null, mock(JobTokenSecretManager.class), null,
         new SystemClock(), null,
-        mrAppMetrics, null, true, null, 0, null, null, null, null);
+        mrAppMetrics, null, true, null, 0, null, mockContext, null, null);
     job.handle(diagUpdateEvent);
     String diagnostics = job.getReport().getDiagnostics();
     Assert.assertNotNull(diagnostics);
@@ -606,7 +611,7 @@ public class TestJobImpl {
         mock(EventHandler.class),
         null, mock(JobTokenSecretManager.class), null,
         new SystemClock(), null,
-        mrAppMetrics, null, true, null, 0, null, null, null, null);
+        mrAppMetrics, null, true, null, 0, null, mockContext, null, null);
     job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
     job.handle(diagUpdateEvent);
     diagnostics = job.getReport().getDiagnostics();
@@ -699,7 +704,9 @@ public class TestJobImpl {
     commitHandler.init(conf);
     commitHandler.start();
 
-    JobImpl job = createStubbedJob(conf, dispatcher, 2, null);
+    AppContext mockContext = mock(AppContext.class);
+    when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
+    JobImpl job = createStubbedJob(conf, dispatcher, 2, mockContext);
     JobId jobId = job.getID();
     job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
     assertJobState(job, JobStateInternal.INITED);
@@ -707,12 +714,15 @@ public class TestJobImpl {
     assertJobState(job, JobStateInternal.FAILED);
 
     job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_COMPLETED));
-    Assert.assertEquals(JobState.FAILED, job.getState());
+    assertJobState(job, JobStateInternal.FAILED);
     job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_ATTEMPT_COMPLETED));
-    Assert.assertEquals(JobState.FAILED, job.getState());
+    assertJobState(job, JobStateInternal.FAILED);
     job.handle(new JobEvent(jobId, JobEventType.JOB_MAP_TASK_RESCHEDULED));
-    Assert.assertEquals(JobState.FAILED, job.getState());
+    assertJobState(job, JobStateInternal.FAILED);
     job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE));
+    assertJobState(job, JobStateInternal.FAILED);
+    Assert.assertEquals(JobState.RUNNING, job.getState());
+    when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
     Assert.assertEquals(JobState.FAILED, job.getState());
 
     dispatcher.stop();
@@ -750,6 +760,10 @@ public class TestJobImpl {
       Dispatcher dispatcher, int numSplits, AppContext appContext) {
     JobID jobID = JobID.forName("job_1234567890000_0001");
     JobId jobId = TypeConverter.toYarn(jobID);
+    if (appContext == null) {
+      appContext = mock(AppContext.class);
+      when(appContext.hasSuccessfullyUnregistered()).thenReturn(true);
+    }
     StubbedJob job = new StubbedJob(jobId,
         ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 0), 0),
         conf,dispatcher.getEventHandler(), true, "somebody", numSplits, appContext);

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java Thu Oct 17 05:32:42 2013
@@ -89,6 +89,10 @@ public class TestLocalContainerAllocator
     }
 
     @Override
+    protected void unregister() {
+    }
+
+    @Override
     protected void startAllocatorThread() {
       allocatorThread = new Thread();
     }

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java Thu Oct 17 05:32:42 2013
@@ -469,7 +469,7 @@ public class TypeConverter {
     QueueInfo toReturn = new QueueInfo(queueInfo.getQueueName(), "Capacity: " +
       queueInfo.getCapacity() * 100 + ", MaximumCapacity: " +
       (queueInfo.getMaximumCapacity() < 0 ? "UNDEFINED" :
-        queueInfo.getMaximumCapacity()) + ", CurrentCapacity: " +
+        queueInfo.getMaximumCapacity() * 100) + ", CurrentCapacity: " +
       queueInfo.getCurrentCapacity() * 100, fromYarn(queueInfo.getQueueState()),
       TypeConverter.fromYarnApps(queueInfo.getApplications(), conf));
     List<QueueInfo> childQueues = new ArrayList<QueueInfo>();

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java Thu Oct 17 05:32:42 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.v2.j
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 
 /**
  * Stores Job History configuration keys that can be set by administrators of
@@ -123,6 +124,12 @@ public class JHAdminConfig {
   public static final String MR_HISTORY_PRINCIPAL = 
     MR_HISTORY_PREFIX + "principal";
   
+  /** To enable https in MR history server */
+  public static final String MR_HS_HTTP_POLICY = MR_HISTORY_PREFIX
+      + "http.policy";
+  public static String DEFAULT_MR_HS_HTTP_POLICY =
+          CommonConfigurationKeysPublic.HTTP_POLICY_HTTP_ONLY;
+  
   /**The address the history server webapp is on.*/
   public static final String MR_HISTORY_WEBAPP_ADDRESS =
     MR_HISTORY_PREFIX + "webapp.address";
@@ -130,6 +137,13 @@ public class JHAdminConfig {
   public static final String DEFAULT_MR_HISTORY_WEBAPP_ADDRESS =
     "0.0.0.0:" + DEFAULT_MR_HISTORY_WEBAPP_PORT;
   
+  /**The https address the history server webapp is on.*/
+  public static final String MR_HISTORY_WEBAPP_HTTPS_ADDRESS =
+      MR_HISTORY_PREFIX + "webapp.https.address";
+  public static final int DEFAULT_MR_HISTORY_WEBAPP_HTTPS_PORT = 19890;
+  public static final String DEFAULT_MR_HISTORY_WEBAPP_HTTPS_ADDRESS =
+      "0.0.0.0:" + DEFAULT_MR_HISTORY_WEBAPP_HTTPS_PORT;
+  
   /**The kerberos principal to be used for spnego filter for history server*/
   public static final String MR_WEBAPP_SPNEGO_USER_NAME_KEY =
       MR_HISTORY_PREFIX + "webapp.spnego-principal";
@@ -150,10 +164,31 @@ public class JHAdminConfig {
   public static final String MR_HISTORY_STORAGE =
     MR_HISTORY_PREFIX + "store.class";
 
+  /**
+   * Enable the history server to store server state and recover server state
+   * upon startup.
+   */
+  public static final String MR_HS_RECOVERY_ENABLE =
+      MR_HISTORY_PREFIX + "recovery.enable";
+  public static final boolean DEFAULT_MR_HS_RECOVERY_ENABLE = false;
+
+  /**
+   * The HistoryServerStateStoreService class to store and recover server state
+   */
+  public static final String MR_HS_STATE_STORE =
+      MR_HISTORY_PREFIX + "recovery.store.class";
+
+  /**
+   * The URI where server state will be stored when
+   * HistoryServerFileSystemStateStoreService is configured as the state store
+   */
+  public static final String MR_HS_FS_STATE_STORE_URI =
+      MR_HISTORY_PREFIX + "recovery.store.fs.uri";
+
   /** Whether to use fixed ports with the minicluster. */
   public static final String MR_HISTORY_MINICLUSTER_FIXED_PORTS = MR_HISTORY_PREFIX
        + "minicluster.fixed.ports";
-
+  
   /**
    * Default is false to be able to run tests concurrently without port
    * conflicts.

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java Thu Oct 17 05:32:42 2013
@@ -20,11 +20,7 @@ package org.apache.hadoop.mapreduce.v2.j
 
 import java.io.File;
 import java.io.IOException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
 import java.util.Calendar;
-import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -45,13 +41,8 @@ import org.apache.hadoop.mapreduce.MRJob
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-
-import com.google.common.base.Joiner;
-import com.google.common.base.Splitter;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -126,9 +117,6 @@ public class JobHistoryUtils {
   public static final Pattern TIMESTAMP_DIR_PATTERN = Pattern.compile(TIMESTAMP_DIR_REGEX);
   private static final String TIMESTAMP_DIR_FORMAT = "%04d" + File.separator + "%02d" + File.separator + "%02d";
 
-  private static final Splitter ADDR_SPLITTER = Splitter.on(':').trimResults();
-  private static final Joiner JOINER = Joiner.on("");
-
   private static final PathFilter CONF_FILTER = new PathFilter() {
     @Override
     public boolean accept(Path path) {
@@ -497,36 +485,6 @@ public class JobHistoryUtils {
     return result;
   }
 
-  public static String getHistoryUrl(Configuration conf, ApplicationId appId) 
-       throws UnknownHostException {
-  //construct the history url for job
-    String addr = conf.get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
-        JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);
-    Iterator<String> it = ADDR_SPLITTER.split(addr).iterator();
-    it.next(); // ignore the bind host
-    String port = it.next();
-    // Use hs address to figure out the host for webapp
-    addr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS,
-        JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS);
-    String host = ADDR_SPLITTER.split(addr).iterator().next();
-    String hsAddress = JOINER.join(host, ":", port);
-    InetSocketAddress address = NetUtils.createSocketAddr(
-      hsAddress, JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT,
-      JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);
-    StringBuffer sb = new StringBuffer();
-    if (address.getAddress().isAnyLocalAddress() || 
-        address.getAddress().isLoopbackAddress()) {
-      sb.append(InetAddress.getLocalHost().getCanonicalHostName());
-    } else {
-      sb.append(address.getHostName());
-    }
-    sb.append(":").append(address.getPort());
-    sb.append("/jobhistory/job/");
-    JobID jobId = TypeConverter.fromYarn(appId);
-    sb.append(jobId.toString());
-    return sb.toString();
-  }
-
   public static Path getPreviousJobHistoryPath(
       Configuration conf, ApplicationAttemptId applicationAttemptId)
       throws IOException {

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java Thu Oct 17 05:32:42 2013
@@ -21,6 +21,7 @@ package org.apache.hadoop.mapreduce.v2.u
 import java.io.IOException;
 import java.net.MalformedURLException;
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.security.AccessController;
 import java.security.PrivilegedActionException;
 import java.security.PrivilegedExceptionAction;
@@ -48,6 +49,7 @@ import org.apache.hadoop.mapreduce.v2.ap
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.ContainerLogAppender;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
@@ -133,6 +135,30 @@ public class MRApps extends Apps {
     return TaskAttemptStateUI.valueOf(attemptStateStr);
   }
 
+  // gets the base name of the MapReduce framework or null if no
+  // framework was configured
+  private static String getMRFrameworkName(Configuration conf) {
+    String frameworkName = null;
+    String framework =
+        conf.get(MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH, "");
+    if (!framework.isEmpty()) {
+      URI uri;
+      try {
+        uri = new URI(framework);
+      } catch (URISyntaxException e) {
+        throw new IllegalArgumentException("Unable to parse '" + framework
+            + "' as a URI, check the setting for "
+            + MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH, e);
+      }
+
+      frameworkName = uri.getFragment();
+      if (frameworkName == null) {
+        frameworkName = new Path(uri).getName();
+      }
+    }
+    return frameworkName;
+  }
+
   private static void setMRFrameworkClasspath(
       Map<String, String> environment, Configuration conf) throws IOException {
     // Propagate the system classpath when using the mini cluster
@@ -141,18 +167,34 @@ public class MRApps extends Apps {
           System.getProperty("java.class.path"));
     }
 
-    // Add standard Hadoop classes
-    for (String c : conf.getStrings(
-        YarnConfiguration.YARN_APPLICATION_CLASSPATH,
-        YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
-      Apps.addToEnvironment(environment, Environment.CLASSPATH.name(), c
-          .trim());
+    // if the framework is specified then only use the MR classpath
+    String frameworkName = getMRFrameworkName(conf);
+    if (frameworkName == null) {
+      // Add standard Hadoop classes
+      for (String c : conf.getStrings(
+          YarnConfiguration.YARN_APPLICATION_CLASSPATH,
+          YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
+        Apps.addToEnvironment(environment, Environment.CLASSPATH.name(), c
+            .trim());
+      }
     }
+
+    boolean foundFrameworkInClasspath = (frameworkName == null);
     for (String c : conf.getStrings(
         MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH,
-        MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH)) {
+        StringUtils.getStrings(
+            MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH))){
       Apps.addToEnvironment(environment, Environment.CLASSPATH.name(), c
           .trim());
+      if (!foundFrameworkInClasspath) {
+        foundFrameworkInClasspath = c.contains(frameworkName);
+      }
+    }
+
+    if (!foundFrameworkInClasspath) {
+      throw new IllegalArgumentException(
+          "Could not locate MapReduce framework name '" + frameworkName
+          + "' in " + MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH);
     }
     // TODO: Remove duplicates.
   }

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto Thu Oct 17 05:32:42 2013
@@ -19,6 +19,7 @@
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "MRClientProtocol";
 option java_generic_services = true;
+package hadoop.mapreduce;
 
 import "Security.proto";
 import "mr_service_protos.proto";

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto Thu Oct 17 05:32:42 2013
@@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "MRProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
+package hadoop.mapreduce;
 
 import "yarn_protos.proto";
 
@@ -29,7 +30,7 @@ enum TaskTypeProto {
 }
 
 message JobIdProto {
-  optional ApplicationIdProto app_id = 1;
+  optional hadoop.yarn.ApplicationIdProto app_id = 1;
   optional int32 id = 2;
 }
 
@@ -115,7 +116,7 @@ message TaskAttemptReportProto {
   optional string node_manager_host = 12;
   optional int32 node_manager_port = 13;
   optional int32 node_manager_http_port = 14;
-  optional ContainerIdProto container_id = 15;
+  optional hadoop.yarn.ContainerIdProto container_id = 15;
 }
 
 enum JobStateProto {
@@ -148,9 +149,9 @@ message JobReportProto {
 }
 
 message AMInfoProto {
-  optional ApplicationAttemptIdProto application_attempt_id = 1;
+  optional hadoop.yarn.ApplicationAttemptIdProto application_attempt_id = 1;
   optional int64 start_time = 2;
-  optional ContainerIdProto container_id = 3;
+  optional hadoop.yarn.ContainerIdProto container_id = 3;
   optional string node_manager_host = 4;
   optional int32 node_manager_port = 5;
   optional int32 node_manager_http_port = 6;

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto Thu Oct 17 05:32:42 2013
@@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "MRServiceProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
+package hadoop.mapreduce;
 
 import "Security.proto";
 import "mr_protos.proto";

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java Thu Oct 17 05:32:42 2013
@@ -191,7 +191,9 @@ public class TestMRApps {
     }
     assertTrue(environment.get("CLASSPATH").contains(yarnAppClasspath));
     String mrAppClasspath = 
-        job.getConfiguration().get(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH);
+        job.getConfiguration().get(
+            MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH,
+            MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH);
     if (mrAppClasspath != null) {
       mrAppClasspath = mrAppClasspath.replaceAll(",\\s*", File.pathSeparator)
         .trim();
@@ -282,7 +284,46 @@ public class TestMRApps {
     assertEquals("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is not in the app"
       + " classpath!", expectedAppClasspath, appCp);
   }
-  
+
+  @Test (timeout = 3000000)
+  public void testSetClasspathWithFramework() throws IOException {
+    final String FRAMEWORK_NAME = "some-framework-name";
+    final String FRAMEWORK_PATH = "some-framework-path#" + FRAMEWORK_NAME;
+    Configuration conf = new Configuration();
+    conf.set(MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH, FRAMEWORK_PATH);
+    Map<String, String> env = new HashMap<String, String>();
+    try {
+      MRApps.setClasspath(env, conf);
+      fail("Failed to catch framework path set without classpath change");
+    } catch (IllegalArgumentException e) {
+      assertTrue("Unexpected IllegalArgumentException",
+          e.getMessage().contains("Could not locate MapReduce framework name '"
+              + FRAMEWORK_NAME + "'"));
+    }
+
+    env.clear();
+    final String FRAMEWORK_CLASSPATH = FRAMEWORK_NAME + "/*.jar";
+    conf.set(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH, FRAMEWORK_CLASSPATH);
+    MRApps.setClasspath(env, conf);
+    final String stdClasspath = StringUtils.join(File.pathSeparator,
+        Arrays.asList("job.jar/job.jar", "job.jar/classes/", "job.jar/lib/*",
+            ApplicationConstants.Environment.PWD.$() + "/*"));
+    String expectedClasspath = StringUtils.join(File.pathSeparator,
+        Arrays.asList(ApplicationConstants.Environment.PWD.$(),
+            FRAMEWORK_CLASSPATH, stdClasspath));
+    assertEquals("Incorrect classpath with framework and no user precedence",
+        expectedClasspath, env.get("CLASSPATH"));
+
+    env.clear();
+    conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, true);
+    MRApps.setClasspath(env, conf);
+    expectedClasspath = StringUtils.join(File.pathSeparator,
+        Arrays.asList(ApplicationConstants.Environment.PWD.$(),
+            stdClasspath, FRAMEWORK_CLASSPATH));
+    assertEquals("Incorrect classpath with framework and user precedence",
+        expectedClasspath, env.get("CLASSPATH"));
+  }
+
   @Test (timeout = 30000)
   public void testSetupDistributedCacheEmpty() throws IOException {
     Configuration conf = new Configuration();

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java Thu Oct 17 05:32:42 2013
@@ -62,8 +62,8 @@ import com.google.common.collect.Iterato
 public class Counters
     extends AbstractCounters<Counters.Counter, Counters.Group> {
   
-  public static int MAX_COUNTER_LIMIT = Limits.COUNTERS_MAX;
-  public static int MAX_GROUP_LIMIT = Limits.GROUPS_MAX;
+  public static int MAX_COUNTER_LIMIT = Limits.getCountersMax();
+  public static int MAX_GROUP_LIMIT = Limits.getGroupsMax();
   private static HashMap<String, String> depricatedCounterMap =
       new HashMap<String, String>();
   

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java Thu Oct 17 05:32:42 2013
@@ -173,7 +173,12 @@ public class JobClient extends CLI {
      * job completes.)
      */
     public NetworkedJob(JobStatus status, Cluster cluster) throws IOException {
-      job = Job.getInstance(cluster, status, new JobConf(status.getJobFile()));
+      this(status, cluster, new JobConf(status.getJobFile()));
+    }
+    
+    private NetworkedJob(JobStatus status, Cluster cluster, JobConf conf)
+        throws IOException {
+      this(Job.getInstance(cluster, status, conf));
     }
 
     public NetworkedJob(Job job) throws IOException {
@@ -592,7 +597,8 @@ public class JobClient extends CLI {
       if (job != null) {
         JobStatus status = JobStatus.downgrade(job.getStatus());
         if (status != null) {
-          return new NetworkedJob(status, cluster);
+          return new NetworkedJob(status, cluster,
+              new JobConf(job.getConfiguration()));
         } 
       }
     } catch (InterruptedException ie) {

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java Thu Oct 17 05:32:42 2013
@@ -972,12 +972,10 @@ public class MapTask extends Task {
       maxRec = kvmeta.capacity() / NMETA;
       softLimit = (int)(kvbuffer.length * spillper);
       bufferRemaining = softLimit;
-      if (LOG.isInfoEnabled()) {
-        LOG.info(JobContext.IO_SORT_MB + ": " + sortmb);
-        LOG.info("soft limit at " + softLimit);
-        LOG.info("bufstart = " + bufstart + "; bufvoid = " + bufvoid);
-        LOG.info("kvstart = " + kvstart + "; length = " + maxRec);
-      }
+      LOG.info(JobContext.IO_SORT_MB + ": " + sortmb);
+      LOG.info("soft limit at " + softLimit);
+      LOG.info("bufstart = " + bufstart + "; bufvoid = " + bufvoid);
+      LOG.info("kvstart = " + kvstart + "; length = " + maxRec);
 
       // k/v serialization
       comparator = job.getOutputKeyComparator();
@@ -1180,10 +1178,8 @@ public class MapTask extends Task {
       final int aligned = pos - (pos % METASIZE);
       kvindex =
         ((aligned - METASIZE + kvbuffer.length) % kvbuffer.length) / 4;
-      if (LOG.isInfoEnabled()) {
-        LOG.info("(EQUATOR) " + pos + " kvi " + kvindex +
-            "(" + (kvindex * 4) + ")");
-      }
+      LOG.info("(EQUATOR) " + pos + " kvi " + kvindex +
+          "(" + (kvindex * 4) + ")");
     }
 
     /**
@@ -1198,10 +1194,8 @@ public class MapTask extends Task {
       // set start/end to point to first meta record
       kvstart = kvend =
         ((aligned - METASIZE + kvbuffer.length) % kvbuffer.length) / 4;
-      if (LOG.isInfoEnabled()) {
-        LOG.info("(RESET) equator " + e + " kv " + kvstart + "(" +
-          (kvstart * 4) + ")" + " kvi " + kvindex + "(" + (kvindex * 4) + ")");
-      }
+      LOG.info("(RESET) equator " + e + " kv " + kvstart + "(" +
+        (kvstart * 4) + ")" + " kvi " + kvindex + "(" + (kvindex * 4) + ")");
     }
 
     /**
@@ -1456,15 +1450,13 @@ public class MapTask extends Task {
         if (kvindex != kvend) {
           kvend = (kvindex + NMETA) % kvmeta.capacity();
           bufend = bufmark;
-          if (LOG.isInfoEnabled()) {
-            LOG.info("Spilling map output");
-            LOG.info("bufstart = " + bufstart + "; bufend = " + bufmark +
-                     "; bufvoid = " + bufvoid);
-            LOG.info("kvstart = " + kvstart + "(" + (kvstart * 4) +
-                     "); kvend = " + kvend + "(" + (kvend * 4) +
-                     "); length = " + (distanceTo(kvend, kvstart,
-                           kvmeta.capacity()) + 1) + "/" + maxRec);
-          }
+          LOG.info("Spilling map output");
+          LOG.info("bufstart = " + bufstart + "; bufend = " + bufmark +
+                   "; bufvoid = " + bufvoid);
+          LOG.info("kvstart = " + kvstart + "(" + (kvstart * 4) +
+                   "); kvend = " + kvend + "(" + (kvend * 4) +
+                   "); length = " + (distanceTo(kvend, kvstart,
+                         kvmeta.capacity()) + 1) + "/" + maxRec);
           sortAndSpill();
         }
       } catch (InterruptedException e) {
@@ -1547,15 +1539,13 @@ public class MapTask extends Task {
       kvend = (kvindex + NMETA) % kvmeta.capacity();
       bufend = bufmark;
       spillInProgress = true;
-      if (LOG.isInfoEnabled()) {
-        LOG.info("Spilling map output");
-        LOG.info("bufstart = " + bufstart + "; bufend = " + bufmark +
-                 "; bufvoid = " + bufvoid);
-        LOG.info("kvstart = " + kvstart + "(" + (kvstart * 4) +
-                 "); kvend = " + kvend + "(" + (kvend * 4) +
-                 "); length = " + (distanceTo(kvend, kvstart,
-                       kvmeta.capacity()) + 1) + "/" + maxRec);
-      }
+      LOG.info("Spilling map output");
+      LOG.info("bufstart = " + bufstart + "; bufend = " + bufmark +
+               "; bufvoid = " + bufvoid);
+      LOG.info("kvstart = " + kvstart + "(" + (kvstart * 4) +
+               "); kvend = " + kvend + "(" + (kvend * 4) +
+               "); length = " + (distanceTo(kvend, kvstart,
+                     kvmeta.capacity()) + 1) + "/" + maxRec);
       spillReady.signal();
     }
 

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java Thu Oct 17 05:32:42 2013
@@ -47,6 +47,13 @@ public class SequenceFileAsBinaryOutputF
    */
   static protected class WritableValueBytes extends org.apache.hadoop.mapreduce
       .lib.output.SequenceFileAsBinaryOutputFormat.WritableValueBytes {
+    public WritableValueBytes() {
+      super();
+    }
+
+    public WritableValueBytes(BytesWritable value) {
+      super(value);
+    }
   }
 
   /**

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java Thu Oct 17 05:32:42 2013
@@ -24,13 +24,19 @@ import java.util.List;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.CompressionCodecFactory;
+import org.apache.hadoop.io.compress.SplittableCompressionCodec;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 
 /**
@@ -127,4 +133,34 @@ public abstract class CombineFileInputFo
     return result.toArray(new FileStatus[result.size()]);
   }
 
+  /**
+   * Subclasses should avoid overriding this method and should instead only
+   * override {@link #isSplitable(FileSystem, Path)}.  The implementation of
+   * this method simply calls the other method to preserve compatibility.
+   * @see <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5530">
+   * MAPREDUCE-5530</a>
+   *
+   * @param context the job context
+   * @param file the file name to check
+   * @return is this file splitable?
+   */
+  @InterfaceAudience.Private
+  @Override
+  protected boolean isSplitable(JobContext context, Path file) {
+    try {
+      return isSplitable(FileSystem.get(context.getConfiguration()), file);
+    }
+    catch (IOException ioe) {
+      throw new RuntimeException(ioe);
+    }
+  }
+
+  protected boolean isSplitable(FileSystem fs, Path file) {
+    final CompressionCodec codec =
+      new CompressionCodecFactory(fs.getConf()).getCodec(file);
+    if (null == codec) {
+      return true;
+    }
+    return codec instanceof SplittableCompressionCodec;
+  }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java Thu Oct 17 05:32:42 2013
@@ -21,7 +21,7 @@ package org.apache.hadoop.mapred.lib;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Partitioner;
 
@@ -41,4 +41,30 @@ public class TotalOrderPartitioner<K ,V>
     super.setConf(job);
   }
 
+  /**
+   * Set the path to the SequenceFile storing the sorted partition keyset.
+   * It must be the case that for <tt>R</tt> reduces, there are <tt>R-1</tt>
+   * keys in the SequenceFile.
+   * @deprecated Use 
+   * {@link #setPartitionFile(Configuration, Path)}
+   * instead
+   */
+  @Deprecated
+  public static void setPartitionFile(JobConf job, Path p) {
+    org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner.
+            setPartitionFile(job, p);
+  }
+
+  /**
+   * Get the path to the SequenceFile storing the sorted partition keyset.
+   * @see #setPartitionFile(JobConf,Path)
+   * @deprecated Use 
+   * {@link #getPartitionFile(Configuration)}
+   * instead
+   */
+  @Deprecated
+  public static String getPartitionFile(JobConf job) {
+    return org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner.
+            getPartitionFile(job);
+  }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobCounter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobCounter.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobCounter.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobCounter.java Thu Oct 17 05:32:42 2013
@@ -34,9 +34,13 @@ public enum JobCounter {
   OTHER_LOCAL_MAPS,
   DATA_LOCAL_MAPS,
   RACK_LOCAL_MAPS,
+  @Deprecated
   SLOTS_MILLIS_MAPS,
+  @Deprecated
   SLOTS_MILLIS_REDUCES,
+  @Deprecated
   FALLOW_SLOTS_MILLIS_MAPS,
+  @Deprecated
   FALLOW_SLOTS_MILLIS_REDUCES,
   TOTAL_LAUNCHED_UBERTASKS,
   NUM_UBER_SUBMAPS,



Mime
View raw message