001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hdfs.client;
019
020import java.io.FileNotFoundException;
021import java.io.IOException;
022import java.net.URI;
023import java.util.Collection;
024import java.util.EnumSet;
025
026import org.apache.hadoop.HadoopIllegalArgumentException;
027import org.apache.hadoop.classification.InterfaceAudience;
028import org.apache.hadoop.classification.InterfaceStability;
029import org.apache.hadoop.conf.Configuration;
030import org.apache.hadoop.fs.BlockStoragePolicySpi;
031import org.apache.hadoop.fs.CacheFlag;
032import org.apache.hadoop.fs.FileStatus;
033import org.apache.hadoop.fs.FileSystem;
034import org.apache.hadoop.fs.Path;
035import org.apache.hadoop.fs.RemoteIterator;
036import org.apache.hadoop.fs.StorageType;
037import org.apache.hadoop.fs.permission.FsAction;
038import org.apache.hadoop.fs.permission.FsPermission;
039import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
040import org.apache.hadoop.hdfs.DistributedFileSystem;
041import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
042import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
043import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
044import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
045import org.apache.hadoop.hdfs.protocol.EncryptionZone;
046import org.apache.hadoop.hdfs.protocol.HdfsConstants;
047import org.apache.hadoop.security.AccessControlException;
048import org.apache.hadoop.hdfs.tools.DFSAdmin;
049import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
050
051/**
052 * The public API for performing administrative functions on HDFS. Those writing
053 * applications against HDFS should prefer this interface to directly accessing
054 * functionality in DistributedFileSystem or DFSClient.
055 * 
056 * Note that this is distinct from the similarly-named {@link DFSAdmin}, which
057 * is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
058 * commands.
059 */
060@InterfaceAudience.Public
061@InterfaceStability.Evolving
062public class HdfsAdmin {
063  
064  private DistributedFileSystem dfs;
065  private static final FsPermission TRASH_PERMISSION = new FsPermission(
066      FsAction.ALL, FsAction.ALL, FsAction.ALL, true);
067  
068  /**
069   * Create a new HdfsAdmin client.
070   * 
071   * @param uri the unique URI of the HDFS file system to administer
072   * @param conf configuration
073   * @throws IOException in the event the file system could not be created
074   */
075  public HdfsAdmin(URI uri, Configuration conf) throws IOException {
076    FileSystem fs = FileSystem.get(uri, conf);
077    if (!(fs instanceof DistributedFileSystem)) {
078      throw new IllegalArgumentException("'" + uri + "' is not an HDFS URI.");
079    } else {
080      dfs = (DistributedFileSystem)fs;
081    }
082  }
083  
084  /**
085   * Set the namespace quota (count of files, directories, and sym links) for a
086   * directory.
087   * 
088   * @param src the path to set the quota for
089   * @param quota the value to set for the quota
090   * @throws IOException in the event of error
091   */
092  public void setQuota(Path src, long quota) throws IOException {
093    dfs.setQuota(src, quota, HdfsConstants.QUOTA_DONT_SET);
094  }
095  
096  /**
097   * Clear the namespace quota (count of files, directories and sym links) for a
098   * directory.
099   * 
100   * @param src the path to clear the quota of
101   * @throws IOException in the event of error
102   */
103  public void clearQuota(Path src) throws IOException {
104    dfs.setQuota(src, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
105  }
106  
107  /**
108   * Set the storage space quota (size of files) for a directory. Note that
109   * directories and sym links do not occupy storage space.
110   * 
111   * @param src the path to set the space quota of
112   * @param spaceQuota the value to set for the space quota
113   * @throws IOException in the event of error
114   */
115  public void setSpaceQuota(Path src, long spaceQuota) throws IOException {
116    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, spaceQuota);
117  }
118  
119  /**
120   * Clear the storage space quota (size of files) for a directory. Note that
121   * directories and sym links do not occupy storage space.
122   * 
123   * @param src the path to clear the space quota of
124   * @throws IOException in the event of error
125   */
126  public void clearSpaceQuota(Path src) throws IOException {
127    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET);
128  }
129
130  /**
131   * Set the quota by storage type for a directory. Note that
132   * directories and sym links do not occupy storage type quota.
133   *
134   * @param src the target directory to set the quota by storage type
135   * @param type the storage type to set for quota by storage type
136   * @param quota the value to set for quota by storage type
137   * @throws IOException in the event of error
138   */
139  public void setQuotaByStorageType(Path src, StorageType type, long quota)
140      throws IOException {
141    dfs.setQuotaByStorageType(src, type, quota);
142  }
143
144  /**
145   * Clear the space quota by storage type for a directory. Note that
146   * directories and sym links do not occupy storage type quota.
147   *
148   * @param src the target directory to clear the quota by storage type
149   * @param type the storage type to clear for quota by storage type
150   * @throws IOException in the event of error
151   */
152  public void clearQuotaByStorageType(Path src, StorageType type) throws IOException {
153    dfs.setQuotaByStorageType(src, type, HdfsConstants.QUOTA_RESET);
154  }
155  
156  /**
157   * Allow snapshot on a directory.
158   * @param path The path of the directory where snapshots will be taken.
159   */
160  public void allowSnapshot(Path path) throws IOException {
161    dfs.allowSnapshot(path);
162  }
163  
164  /**
165   * Disallow snapshot on a directory.
166   * @param path The path of the snapshottable directory.
167   */
168  public void disallowSnapshot(Path path) throws IOException {
169    dfs.disallowSnapshot(path);
170  }
171
172  /**
173   * Add a new CacheDirectiveInfo.
174   * 
175   * @param info Information about a directive to add.
176   * @param flags {@link CacheFlag}s to use for this operation.
177   * @return the ID of the directive that was created.
178   * @throws IOException if the directive could not be added
179   */
180  public long addCacheDirective(CacheDirectiveInfo info,
181      EnumSet<CacheFlag> flags) throws IOException {
182  return dfs.addCacheDirective(info, flags);
183  }
184  
185  /**
186   * Modify a CacheDirective.
187   * 
188   * @param info Information about the directive to modify. You must set the ID
189   *          to indicate which CacheDirective you want to modify.
190   * @param flags {@link CacheFlag}s to use for this operation.
191   * @throws IOException if the directive could not be modified
192   */
193  public void modifyCacheDirective(CacheDirectiveInfo info,
194      EnumSet<CacheFlag> flags) throws IOException {
195    dfs.modifyCacheDirective(info, flags);
196  }
197
198  /**
199   * Remove a CacheDirective.
200   * 
201   * @param id identifier of the CacheDirectiveInfo to remove
202   * @throws IOException if the directive could not be removed
203   */
204  public void removeCacheDirective(long id)
205      throws IOException {
206    dfs.removeCacheDirective(id);
207  }
208
209  /**
210   * List cache directives. Incrementally fetches results from the server.
211   * 
212   * @param filter Filter parameters to use when listing the directives, null to
213   *               list all directives visible to us.
214   * @return A RemoteIterator which returns CacheDirectiveInfo objects.
215   */
216  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
217      CacheDirectiveInfo filter) throws IOException {
218    return dfs.listCacheDirectives(filter);
219  }
220
221  /**
222   * Add a cache pool.
223   *
224   * @param info
225   *          The request to add a cache pool.
226   * @throws IOException 
227   *          If the request could not be completed.
228   */
229  public void addCachePool(CachePoolInfo info) throws IOException {
230    dfs.addCachePool(info);
231  }
232
233  /**
234   * Modify an existing cache pool.
235   *
236   * @param info
237   *          The request to modify a cache pool.
238   * @throws IOException 
239   *          If the request could not be completed.
240   */
241  public void modifyCachePool(CachePoolInfo info) throws IOException {
242    dfs.modifyCachePool(info);
243  }
244    
245  /**
246   * Remove a cache pool.
247   *
248   * @param poolName
249   *          Name of the cache pool to remove.
250   * @throws IOException 
251   *          if the cache pool did not exist, or could not be removed.
252   */
253  public void removeCachePool(String poolName) throws IOException {
254    dfs.removeCachePool(poolName);
255  }
256
257  /**
258   * List all cache pools.
259   *
260   * @return A remote iterator from which you can get CachePoolEntry objects.
261   *          Requests will be made as needed.
262   * @throws IOException
263   *          If there was an error listing cache pools.
264   */
265  public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
266    return dfs.listCachePools();
267  }
268
269  /**
270   * Create an encryption zone rooted at an empty existing directory, using the
271   * specified encryption key. An encryption zone has an associated encryption
272   * key used when reading and writing files within the zone.
273   *
274   * @param path    The path of the root of the encryption zone. Must refer to
275   *                an empty, existing directory.
276   * @param keyName Name of key available at the KeyProvider.
277   * @throws IOException            if there was a general IO exception
278   * @throws AccessControlException if the caller does not have access to path
279   * @throws FileNotFoundException  if the path does not exist
280   */
281  @Deprecated
282  public void createEncryptionZone(Path path, String keyName)
283      throws IOException, AccessControlException, FileNotFoundException {
284    dfs.createEncryptionZone(path, keyName);
285  }
286
287  /**
288   * Create an encryption zone rooted at an empty existing directory, using the
289   * specified encryption key. An encryption zone has an associated encryption
290   * key used when reading and writing files within the zone.
291   *
292   * Additional options, such as provisioning the trash directory, can be
293   * specified using {@link CreateEncryptionZoneFlag} flags.
294   *
295   * @param path    The path of the root of the encryption zone. Must refer to
296   *                an empty, existing directory.
297   * @param keyName Name of key available at the KeyProvider.
298   * @param flags   flags for this operation.
299   * @throws IOException            if there was a general IO exception
300   * @throws AccessControlException if the caller does not have access to path
301   * @throws FileNotFoundException  if the path does not exist
302   * @throws HadoopIllegalArgumentException if the flags are invalid
303   */
304  public void createEncryptionZone(Path path, String keyName,
305      EnumSet<CreateEncryptionZoneFlag> flags)
306      throws IOException, AccessControlException, FileNotFoundException,
307      HadoopIllegalArgumentException{
308    dfs.createEncryptionZone(path, keyName);
309    if (flags.contains(CreateEncryptionZoneFlag.PROVISION_TRASH)) {
310      if (flags.contains(CreateEncryptionZoneFlag.NO_TRASH)) {
311        throw new HadoopIllegalArgumentException(
312            "can not have both PROVISION_TRASH and NO_TRASH flags");
313      }
314      this.provisionEZTrash(path);
315    }
316  }
317
318  /**
319   * Provision a trash directory for a given encryption zone.
320
321   * @param path the root of the encryption zone
322   * @throws IOException if the trash directory can not be created.
323   */
324  public void provisionEncryptionZoneTrash(Path path) throws IOException {
325    this.provisionEZTrash(path);
326  }
327
328  /**
329   * Get the path of the encryption zone for a given file or directory.
330   *
331   * @param path The path to get the ez for.
332   *
333   * @return The EncryptionZone of the ez, or null if path is not in an ez.
334   * @throws IOException            if there was a general IO exception
335   * @throws AccessControlException if the caller does not have access to path
336   * @throws FileNotFoundException  if the path does not exist
337   */
338  public EncryptionZone getEncryptionZoneForPath(Path path)
339    throws IOException, AccessControlException, FileNotFoundException {
340    return dfs.getEZForPath(path);
341  }
342
343  /**
344   * Returns a RemoteIterator which can be used to list the encryption zones
345   * in HDFS. For large numbers of encryption zones, the iterator will fetch
346   * the list of zones in a number of small batches.
347   * <p/>
348   * Since the list is fetched in batches, it does not represent a
349   * consistent snapshot of the entire list of encryption zones.
350   * <p/>
351   * This method can only be called by HDFS superusers.
352   */
353  public RemoteIterator<EncryptionZone> listEncryptionZones()
354      throws IOException {
355    return dfs.listEncryptionZones();
356  }
357
358  /**
359   * Exposes a stream of namesystem events. Only events occurring after the
360   * stream is created are available.
361   * See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream}
362   * for information on stream usage.
363   * See {@link org.apache.hadoop.hdfs.inotify.Event}
364   * for information on the available events.
365   * <p/>
366   * Inotify users may want to tune the following HDFS parameters to
367   * ensure that enough extra HDFS edits are saved to support inotify clients
368   * that fall behind the current state of the namespace while reading events.
369   * The default parameter values should generally be reasonable. If edits are
370   * deleted before their corresponding events can be read, clients will see a
371   * {@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on
372   * {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls.
373   *
374   * It should generally be sufficient to tune these parameters:
375   * dfs.namenode.num.extra.edits.retained
376   * dfs.namenode.max.extra.edits.segments.retained
377   *
378   * Parameters that affect the number of created segments and the number of
379   * edits that are considered necessary, i.e. do not count towards the
380   * dfs.namenode.num.extra.edits.retained quota):
381   * dfs.namenode.checkpoint.period
382   * dfs.namenode.checkpoint.txns
383   * dfs.namenode.num.checkpoints.retained
384   * dfs.ha.log-roll.period
385   * <p/>
386   * It is recommended that local journaling be configured
387   * (dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
388   * so that edit transfers from the shared journal can be avoided.
389   *
390   * @throws IOException If there was an error obtaining the stream.
391   */
392  public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
393    return dfs.getInotifyEventStream();
394  }
395
396  /**
397   * A version of {@link HdfsAdmin#getInotifyEventStream()} meant for advanced
398   * users who are aware of HDFS edits up to lastReadTxid (e.g. because they
399   * have access to an FSImage inclusive of lastReadTxid) and only want to read
400   * events after this point.
401   */
402  public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
403      throws IOException {
404    return dfs.getInotifyEventStream(lastReadTxid);
405  }
406
407  /**
408   * Set the source path to the specified storage policy.
409   *
410   * @param src The source path referring to either a directory or a file.
411   * @param policyName The name of the storage policy.
412   */
413  public void setStoragePolicy(final Path src, final String policyName)
414      throws IOException {
415    dfs.setStoragePolicy(src, policyName);
416  }
417
418  /**
419   * Unset the storage policy set for a given file or directory.
420   *
421   * @param src file or directory path.
422   * @throws IOException
423   */
424  public void unsetStoragePolicy(final Path src) throws IOException {
425    dfs.unsetStoragePolicy(src);
426  }
427
428  /**
429   * Query the effective storage policy ID for the given file or directory.
430   *
431   * @param src file or directory path.
432   * @return storage policy for the given file or directory.
433   * @throws IOException
434   */
435  public BlockStoragePolicySpi getStoragePolicy(final Path src)
436      throws IOException {
437    return dfs.getStoragePolicy(src);
438  }
439
440  /**
441   * Retrieve all the storage policies supported by HDFS file system.
442   *
443   * @return all storage policies supported by HDFS file system.
444   * @throws IOException
445   */
446  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
447      throws IOException {
448    return dfs.getAllStoragePolicies();
449  }
450
451  /**
452   * Set the source path to the specified erasure coding policy.
453   *
454   * @param path The source path referring to a directory.
455   * @param ecPolicy The erasure coding policy for the directory.
456   *                 If null, the default will be used.
457   * @throws IOException
458   */
459  public void setErasureCodingPolicy(final Path path,
460      final ErasureCodingPolicy ecPolicy) throws IOException {
461    dfs.setErasureCodingPolicy(path, ecPolicy);
462  }
463
464  /**
465   * Get the erasure coding policy information for the specified path
466   *
467   * @param path
468   * @return Returns the policy information if file or directory on the path is
469   *          erasure coded. Null otherwise.
470   * @throws IOException
471   */
472  public ErasureCodingPolicy getErasureCodingPolicy(final Path path)
473      throws IOException {
474    return dfs.getErasureCodingPolicy(path);
475  }
476
477  /**
478   * Get the Erasure coding policies supported.
479   *
480   * @throws IOException
481   */
482  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
483    return dfs.getClient().getErasureCodingPolicies();
484  }
485
486  private void provisionEZTrash(Path path) throws IOException {
487    // make sure the path is an EZ
488    EncryptionZone ez = dfs.getEZForPath(path);
489    if (ez == null) {
490      throw new IllegalArgumentException(path + " is not an encryption zone.");
491    }
492
493    String ezPath = ez.getPath();
494    if (!path.toString().equals(ezPath)) {
495      throw new IllegalArgumentException(path + " is not the root of an " +
496          "encryption zone. Do you mean " + ez.getPath() + "?");
497    }
498
499    // check if the trash directory exists
500
501    Path trashPath = new Path(ez.getPath(), FileSystem.TRASH_PREFIX);
502
503    if (dfs.exists(trashPath)) {
504      String errMessage = "Will not provision new trash directory for " +
505          "encryption zone " + ez.getPath() + ". Path already exists.";
506      FileStatus trashFileStatus = dfs.getFileStatus(trashPath);
507      if (!trashFileStatus.isDirectory()) {
508        errMessage += "\r\n" +
509            "Warning: " + trashPath.toString() + " is not a directory";
510      }
511      if (!trashFileStatus.getPermission().equals(TRASH_PERMISSION)) {
512        errMessage += "\r\n" +
513            "Warning: the permission of " +
514            trashPath.toString() + " is not " + TRASH_PERMISSION;
515      }
516      throw new IOException(errMessage);
517    }
518
519    // Update the permission bits
520    dfs.mkdir(trashPath, TRASH_PERMISSION);
521    dfs.setPermission(trashPath, TRASH_PERMISSION);
522  }
523
524}