ScopedFlock: Refactor it to be a subclass of FdFile.
Makes callers cleaner, since they only have to worry about
dealing with regular File objects that they know will be locked
for the duration of their existence. Prevents issues and other
clunky code relating to acquire
Test: scoped_flock_test, test_art_host
Bug: 36369345
Change-Id: I2c9644e448acde6ddac472d88108c7d9a4e1a892
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index b88fe09..2e8d193 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -2153,30 +2153,27 @@
// cleaning up before that (e.g. the oat writers are created before the
// runtime).
profile_compilation_info_.reset(new ProfileCompilationInfo());
- ScopedFlock flock;
- bool success = true;
+ ScopedFlock profile_file;
std::string error;
if (profile_file_fd_ != -1) {
- // The file doesn't need to be flushed so don't check the usage.
- // Pass a bogus path so that we can easily attribute any reported error.
- File file(profile_file_fd_, "profile", /*check_usage*/ false, /*read_only_mode*/ true);
- if (flock.Init(&file, &error)) {
- success = profile_compilation_info_->Load(profile_file_fd_);
- }
+ profile_file = LockedFile::DupOf(profile_file_fd_, "profile",
+ true /* read_only_mode */, &error);
} else if (profile_file_ != "") {
- if (flock.Init(profile_file_.c_str(), O_RDONLY, /* block */ true, &error)) {
- success = profile_compilation_info_->Load(flock.GetFile()->Fd());
- }
- }
- if (!error.empty()) {
- LOG(WARNING) << "Cannot lock profiles: " << error;
+ profile_file = LockedFile::Open(profile_file_.c_str(), O_RDONLY, true, &error);
}
- if (!success) {
+ // Return early if we're unable to obtain a lock on the profile.
+ if (profile_file.get() == nullptr) {
+ LOG(ERROR) << "Cannot lock profiles: " << error;
+ return false;
+ }
+
+ if (!profile_compilation_info_->Load(profile_file->Fd())) {
profile_compilation_info_.reset(nullptr);
+ return false;
}
- return success;
+ return true;
}
private:
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index ec3481b..848eb8d 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -304,8 +304,10 @@
TimingLogger::ScopedTiming t("Writing image File", timings_);
std::string error_msg;
- ScopedFlock img_flock;
- img_flock.Init(out, &error_msg);
+ // No error checking here, this is best effort. The locking may or may not
+ // succeed and we don't really care either way.
+ ScopedFlock img_flock = LockedFile::DupOf(out->Fd(), out->GetPath(),
+ true /* read_only_mode */, &error_msg);
CHECK(image_ != nullptr);
CHECK(out != nullptr);
diff --git a/profman/profile_assistant.cc b/profman/profile_assistant.cc
index b9a85bc..c238f0d 100644
--- a/profman/profile_assistant.cc
+++ b/profman/profile_assistant.cc
@@ -33,7 +33,7 @@
ProfileCompilationInfo info;
// Load the reference profile.
- if (!info.Load(reference_profile_file.GetFile()->Fd())) {
+ if (!info.Load(reference_profile_file->Fd())) {
LOG(WARNING) << "Could not load reference profile file";
return kErrorBadProfiles;
}
@@ -45,7 +45,7 @@
// Merge all current profiles.
for (size_t i = 0; i < profile_files.size(); i++) {
ProfileCompilationInfo cur_info;
- if (!cur_info.Load(profile_files[i].GetFile()->Fd())) {
+ if (!cur_info.Load(profile_files[i]->Fd())) {
LOG(WARNING) << "Could not load profile file at index " << i;
return kErrorBadProfiles;
}
@@ -62,11 +62,11 @@
}
// We were successful in merging all profile information. Update the reference profile.
- if (!reference_profile_file.GetFile()->ClearContent()) {
+ if (!reference_profile_file->ClearContent()) {
PLOG(WARNING) << "Could not clear reference profile file";
return kErrorIO;
}
- if (!info.Save(reference_profile_file.GetFile()->Fd())) {
+ if (!info.Save(reference_profile_file->Fd())) {
LOG(WARNING) << "Could not save reference profile file";
return kErrorIO;
}
@@ -74,26 +74,15 @@
return kCompile;
}
-static bool InitFlock(const std::string& filename, ScopedFlock& flock, std::string* error) {
- return flock.Init(filename.c_str(), O_RDWR, /* block */ true, error);
-}
-
-static bool InitFlock(int fd, ScopedFlock& flock, std::string* error) {
- DCHECK_GE(fd, 0);
- // We do not own the descriptor, so disable auto-close and don't check usage.
- File file(fd, false);
- file.DisableAutoClose();
- return flock.Init(&file, error);
-}
-
-class ScopedCollectionFlock {
+class ScopedFlockList {
public:
- explicit ScopedCollectionFlock(size_t size) : flocks_(size) {}
+ explicit ScopedFlockList(size_t size) : flocks_(size) {}
// Will block until all the locks are acquired.
bool Init(const std::vector<std::string>& filenames, /* out */ std::string* error) {
for (size_t i = 0; i < filenames.size(); i++) {
- if (!InitFlock(filenames[i], flocks_[i], error)) {
+ flocks_[i] = LockedFile::Open(filenames[i].c_str(), O_RDWR, /* block */ true, error);
+ if (flocks_[i].get() == nullptr) {
*error += " (index=" + std::to_string(i) + ")";
return false;
}
@@ -105,7 +94,9 @@
bool Init(const std::vector<int>& fds, /* out */ std::string* error) {
for (size_t i = 0; i < fds.size(); i++) {
DCHECK_GE(fds[i], 0);
- if (!InitFlock(fds[i], flocks_[i], error)) {
+ flocks_[i] = LockedFile::DupOf(fds[i], "profile-file",
+ true /* read_only_mode */, error);
+ if (flocks_[i].get() == nullptr) {
*error += " (index=" + std::to_string(i) + ")";
return false;
}
@@ -123,39 +114,47 @@
const std::vector<int>& profile_files_fd,
int reference_profile_file_fd) {
DCHECK_GE(reference_profile_file_fd, 0);
+
std::string error;
- ScopedCollectionFlock profile_files_flocks(profile_files_fd.size());
- if (!profile_files_flocks.Init(profile_files_fd, &error)) {
+ ScopedFlockList profile_files(profile_files_fd.size());
+ if (!profile_files.Init(profile_files_fd, &error)) {
LOG(WARNING) << "Could not lock profile files: " << error;
return kErrorCannotLock;
}
- ScopedFlock reference_profile_file_flock;
- if (!InitFlock(reference_profile_file_fd, reference_profile_file_flock, &error)) {
+
+ // The reference_profile_file is opened in read/write mode because it's
+ // cleared after processing.
+ ScopedFlock reference_profile_file = LockedFile::DupOf(reference_profile_file_fd,
+ "reference-profile",
+ false /* read_only_mode */,
+ &error);
+ if (reference_profile_file.get() == nullptr) {
LOG(WARNING) << "Could not lock reference profiled files: " << error;
return kErrorCannotLock;
}
- return ProcessProfilesInternal(profile_files_flocks.Get(),
- reference_profile_file_flock);
+ return ProcessProfilesInternal(profile_files.Get(), reference_profile_file);
}
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
const std::vector<std::string>& profile_files,
const std::string& reference_profile_file) {
std::string error;
- ScopedCollectionFlock profile_files_flocks(profile_files.size());
- if (!profile_files_flocks.Init(profile_files, &error)) {
+
+ ScopedFlockList profile_files_list(profile_files.size());
+ if (!profile_files_list.Init(profile_files, &error)) {
LOG(WARNING) << "Could not lock profile files: " << error;
return kErrorCannotLock;
}
- ScopedFlock reference_profile_file_flock;
- if (!InitFlock(reference_profile_file, reference_profile_file_flock, &error)) {
+
+ ScopedFlock locked_reference_profile_file = LockedFile::Open(
+ reference_profile_file.c_str(), O_RDWR, /* block */ true, &error);
+ if (locked_reference_profile_file.get() == nullptr) {
LOG(WARNING) << "Could not lock reference profile files: " << error;
return kErrorCannotLock;
}
- return ProcessProfilesInternal(profile_files_flocks.Get(),
- reference_profile_file_flock);
+ return ProcessProfilesInternal(profile_files_list.Get(), locked_reference_profile_file);
}
} // namespace art
diff --git a/runtime/base/scoped_flock.cc b/runtime/base/scoped_flock.cc
index 862f0d0..b8df689 100644
--- a/runtime/base/scoped_flock.cc
+++ b/runtime/base/scoped_flock.cc
@@ -28,46 +28,39 @@
using android::base::StringPrintf;
-bool ScopedFlock::Init(const char* filename, std::string* error_msg) {
- return Init(filename, O_CREAT | O_RDWR, true, error_msg);
+/* static */ ScopedFlock LockedFile::Open(const char* filename, std::string* error_msg) {
+ return Open(filename, O_CREAT | O_RDWR, true, error_msg);
}
-bool ScopedFlock::Init(const char* filename, int flags, bool block, std::string* error_msg) {
- return Init(filename, flags, block, true, error_msg);
-}
-
-bool ScopedFlock::Init(const char* filename,
- int flags,
- bool block,
- bool flush_on_close,
- std::string* error_msg) {
- flush_on_close_ = flush_on_close;
+/* static */ ScopedFlock LockedFile::Open(const char* filename, int flags, bool block,
+ std::string* error_msg) {
while (true) {
- if (file_.get() != nullptr) {
- UNUSED(file_->FlushCloseOrErase()); // Ignore result.
+ // NOTE: We don't check usage here because the ScopedFlock should *never* be
+ // responsible for flushing its underlying FD. Its only purpose should be
+ // to acquire a lock, and the unlock / close in the corresponding
+ // destructor. Callers should explicitly flush files they're writing to if
+ // that is the desired behaviour.
+ std::unique_ptr<File> file(OS::OpenFileWithFlags(filename, flags, false /* check_usage */));
+ if (file.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
+ return nullptr;
}
- bool check_usage = flush_on_close; // Check usage only if we need to flush on close.
- file_.reset(OS::OpenFileWithFlags(filename, flags, check_usage));
- if (file_.get() == nullptr) {
- *error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
- return false;
- }
int operation = block ? LOCK_EX : (LOCK_EX | LOCK_NB);
- int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), operation));
+ int flock_result = TEMP_FAILURE_RETRY(flock(file->Fd(), operation));
if (flock_result == EWOULDBLOCK) {
// File is locked by someone else and we are required not to block;
- return false;
+ return nullptr;
}
if (flock_result != 0) {
*error_msg = StringPrintf("Failed to lock file '%s': %s", filename, strerror(errno));
- return false;
+ return nullptr;
}
struct stat fstat_stat;
- int fstat_result = TEMP_FAILURE_RETRY(fstat(file_->Fd(), &fstat_stat));
+ int fstat_result = TEMP_FAILURE_RETRY(fstat(file->Fd(), &fstat_stat));
if (fstat_result != 0) {
*error_msg = StringPrintf("Failed to fstat file '%s': %s", filename, strerror(errno));
- return false;
+ return nullptr;
}
struct stat stat_stat;
int stat_result = TEMP_FAILURE_RETRY(stat(filename, &stat_stat));
@@ -80,7 +73,7 @@
// Note that in theory we could race with someone here for a long time and end up retrying
// over and over again. This potential behavior does not fit well in the non-blocking
// semantics. Thus, if we are not require to block return failure when racing.
- return false;
+ return nullptr;
}
}
if (fstat_stat.st_dev != stat_stat.st_dev || fstat_stat.st_ino != stat_stat.st_ino) {
@@ -89,61 +82,47 @@
continue;
} else {
// See comment above.
- return false;
+ return nullptr;
}
}
- return true;
+
+ return ScopedFlock(new LockedFile(std::move((*file.get()))));
}
}
-bool ScopedFlock::Init(File* file, std::string* error_msg) {
- flush_on_close_ = true;
- file_.reset(new File(dup(file->Fd()), file->GetPath(), file->CheckUsage(), file->ReadOnlyMode()));
- if (file_->Fd() == -1) {
- file_.reset();
+ScopedFlock LockedFile::DupOf(const int fd, const std::string& path,
+ const bool read_only_mode, std::string* error_msg) {
+ // NOTE: We don't check usage here because the ScopedFlock should *never* be
+ // responsible for flushing its underlying FD. Its only purpose should be
+ // to acquire a lock, and the unlock / close in the corresponding
+ // destructor. Callers should explicitly flush files they're writing to if
+ // that is the desired behaviour.
+ ScopedFlock locked_file(
+ new LockedFile(dup(fd), path, false /* check_usage */, read_only_mode));
+ if (locked_file->Fd() == -1) {
*error_msg = StringPrintf("Failed to duplicate open file '%s': %s",
- file->GetPath().c_str(), strerror(errno));
- return false;
+ locked_file->GetPath().c_str(), strerror(errno));
+ return nullptr;
}
- if (0 != TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_EX))) {
- file_.reset();
+ if (0 != TEMP_FAILURE_RETRY(flock(locked_file->Fd(), LOCK_EX))) {
*error_msg = StringPrintf(
- "Failed to lock file '%s': %s", file->GetPath().c_str(), strerror(errno));
- return false;
+ "Failed to lock file '%s': %s", locked_file->GetPath().c_str(), strerror(errno));
+ return nullptr;
}
- return true;
+
+ return locked_file;
}
-File* ScopedFlock::GetFile() const {
- CHECK(file_.get() != nullptr);
- return file_.get();
-}
-
-bool ScopedFlock::HasFile() {
- return file_.get() != nullptr;
-}
-
-ScopedFlock::ScopedFlock() : flush_on_close_(true) { }
-
-ScopedFlock::~ScopedFlock() {
- if (file_.get() != nullptr) {
- int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_UN));
+void LockedFile::ReleaseLock() {
+ if (this->Fd() != -1) {
+ int flock_result = TEMP_FAILURE_RETRY(flock(this->Fd(), LOCK_UN));
if (flock_result != 0) {
// Only printing a warning is okay since this is only used with either:
// 1) a non-blocking Init call, or
// 2) as a part of a seperate binary (eg dex2oat) which has it's own timeout logic to prevent
// deadlocks.
// This means we can be sure that the warning won't cause a deadlock.
- PLOG(WARNING) << "Unable to unlock file " << file_->GetPath();
- }
- int close_result = -1;
- if (file_->ReadOnlyMode() || !flush_on_close_) {
- close_result = file_->Close();
- } else {
- close_result = file_->FlushCloseOrErase();
- }
- if (close_result != 0) {
- PLOG(WARNING) << "Could not close scoped file lock file.";
+ PLOG(WARNING) << "Unable to unlock file " << this->GetPath();
}
}
}
diff --git a/runtime/base/scoped_flock.h b/runtime/base/scoped_flock.h
index a3a320f..1b933c0 100644
--- a/runtime/base/scoped_flock.h
+++ b/runtime/base/scoped_flock.h
@@ -20,63 +20,68 @@
#include <memory>
#include <string>
+#include "android-base/unique_fd.h"
+
+#include "base/logging.h"
#include "base/macros.h"
+#include "base/unix_file/fd_file.h"
#include "os.h"
namespace art {
-// A scoped file-lock implemented using flock. The file is locked by calling the Init function and
-// is released during destruction. Note that failing to unlock the file only causes a warning to be
-// printed. Users should take care that this does not cause potential deadlocks.
-//
-// Only printing a warning on unlock failure is okay since this is only used with either:
-// 1) a non-blocking Init call, or
-// 2) as a part of a seperate binary (eg dex2oat) which has it's own timeout logic to prevent
-// deadlocks.
-// This means we can be sure that the warning won't cause a deadlock.
-class ScopedFlock {
- public:
- ScopedFlock();
+class LockedFile;
+class LockedFileCloseNoFlush;
+// A scoped File object that calls Close without flushing.
+typedef std::unique_ptr<LockedFile, LockedFileCloseNoFlush> ScopedFlock;
+
+class LockedFile : public unix_file::FdFile {
+ public:
// Attempts to acquire an exclusive file lock (see flock(2)) on the file
// at filename, and blocks until it can do so.
//
- // Returns true if the lock could be acquired, or false if an error occurred.
// It is an error if its inode changed (usually due to a new file being
// created at the same path) between attempts to lock it. In blocking mode,
// locking will be retried if the file changed. In non-blocking mode, false
// is returned and no attempt is made to re-acquire the lock.
//
- // The argument `flush_on_close` controls whether or not the file
- // will be explicitly flushed before close.
- //
// The file is opened with the provided flags.
- bool Init(const char* filename,
- int flags,
- bool block,
- bool flush_on_close,
- std::string* error_msg);
- // Calls Init(filename, flags, block, true, error_msg);
- bool Init(const char* filename, int flags, bool block, std::string* error_msg);
- // Calls Init(filename, O_CREAT | O_RDWR, true, errror_msg)
- bool Init(const char* filename, std::string* error_msg);
+ static ScopedFlock Open(const char* filename, int flags, bool block,
+ std::string* error_msg);
+
+ // Calls Open(filename, O_CREAT | O_RDWR, true, errror_msg)
+ static ScopedFlock Open(const char* filename, std::string* error_msg);
+
// Attempt to acquire an exclusive file lock (see flock(2)) on 'file'.
// Returns true if the lock could be acquired or false if an error
// occured.
- bool Init(File* file, std::string* error_msg);
+ static ScopedFlock DupOf(const int fd, const std::string& path,
+ const bool read_only_mode, std::string* error_message);
- // Returns the (locked) file associated with this instance.
- File* GetFile() const;
-
- // Returns whether a file is held.
- bool HasFile();
-
- ~ScopedFlock();
+ // Release a lock held on this file, if any.
+ void ReleaseLock();
private:
- std::unique_ptr<File> file_;
- bool flush_on_close_;
- DISALLOW_COPY_AND_ASSIGN(ScopedFlock);
+ // Constructors should not be invoked directly, use one of the factory
+ // methods instead.
+ explicit LockedFile(FdFile&& other) : FdFile(std::move(other)) {
+ }
+
+ // Constructors should not be invoked directly, use one of the factory
+ // methods instead.
+ LockedFile(int fd, const std::string& path, bool check_usage, bool read_only_mode)
+ : FdFile(fd, path, check_usage, read_only_mode) {
+ }
+};
+
+class LockedFileCloseNoFlush {
+ public:
+ void operator()(LockedFile* ptr) {
+ ptr->ReleaseLock();
+ UNUSED(ptr->Close());
+
+ delete ptr;
+ }
};
} // namespace art
diff --git a/runtime/base/scoped_flock_test.cc b/runtime/base/scoped_flock_test.cc
index 1fa7a12..1b6caaf 100644
--- a/runtime/base/scoped_flock_test.cc
+++ b/runtime/base/scoped_flock_test.cc
@@ -30,11 +30,33 @@
// to each other, so attempting to query locks set by flock using
// using fcntl(,F_GETLK,) will not work. see kernel doc at
// Documentation/filesystems/locks.txt.
- ScopedFlock file_lock;
- ASSERT_TRUE(file_lock.Init(scratch_file.GetFilename().c_str(),
- &error_msg));
+ {
+ ScopedFlock file_lock = LockedFile::Open(scratch_file.GetFilename().c_str(),
+ &error_msg);
+ ASSERT_TRUE(file_lock.get() != nullptr);
- ASSERT_FALSE(file_lock.Init("/guaranteed/not/to/exist", &error_msg));
+ // Attempt to acquire a second lock on the same file. This must fail.
+ ScopedFlock second_lock = LockedFile::Open(scratch_file.GetFilename().c_str(),
+ O_RDONLY,
+ /* block */ false,
+ &error_msg);
+ ASSERT_TRUE(second_lock.get() == nullptr);
+ ASSERT_TRUE(!error_msg.empty());
+ }
+
+ {
+ // Attempt to reacquire the lock once the first lock has been released, this
+ // must succeed.
+ ScopedFlock file_lock = LockedFile::Open(scratch_file.GetFilename().c_str(),
+ &error_msg);
+ ASSERT_TRUE(file_lock.get() != nullptr);
+ }
+
+ {
+ ScopedFlock file_lock = LockedFile::Open("/will/not/exist",
+ &error_msg);
+ ASSERT_TRUE(file_lock.get() == nullptr);
+ }
}
} // namespace art
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 9da2876..1bf9285 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -482,21 +482,22 @@
bool validate_oat_file,
std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
- // Note that we must not use the file descriptor associated with
- // ScopedFlock::GetFile to Init the image file. We want the file
- // descriptor (and the associated exclusive lock) to be released when
- // we leave Create.
- ScopedFlock image_lock;
// Should this be a RDWR lock? This is only a defensive measure, as at
// this point the image should exist.
// However, only the zygote can write into the global dalvik-cache, so
// restrict to zygote processes, or any process that isn't using
// /data/dalvik-cache (which we assume to be allowed to write there).
const bool rw_lock = is_zygote || !is_global_cache;
- image_lock.Init(image_filename.c_str(),
- rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY /* flags */,
- true /* block */,
- error_msg);
+
+ // Note that we must not use the file descriptor associated with
+ // ScopedFlock::GetFile to Init the image file. We want the file
+ // descriptor (and the associated exclusive lock) to be released when
+ // we leave Create.
+ ScopedFlock image = LockedFile::Open(image_filename.c_str(),
+ rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY /* flags */,
+ true /* block */,
+ error_msg);
+
VLOG(startup) << "Using image file " << image_filename.c_str() << " for image location "
<< image_location;
// If we are in /system we can assume the image is good. We can also
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index a67fb38..580be04 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -165,18 +165,20 @@
bool ProfileCompilationInfo::Load(const std::string& filename, bool clear_if_invalid) {
ScopedTrace trace(__PRETTY_FUNCTION__);
- ScopedFlock flock;
std::string error;
int flags = O_RDWR | O_NOFOLLOW | O_CLOEXEC;
// There's no need to fsync profile data right away. We get many chances
// to write it again in case something goes wrong. We can rely on a simple
// close(), no sync, and let to the kernel decide when to write to disk.
- if (!flock.Init(filename.c_str(), flags, /*block*/false, /*flush_on_close*/false, &error)) {
+ ScopedFlock profile_file = LockedFile::Open(filename.c_str(), flags,
+ /*block*/false, &error);
+
+ if (profile_file.get() == nullptr) {
LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
return false;
}
- int fd = flock.GetFile()->Fd();
+ int fd = profile_file->Fd();
ProfileLoadSatus status = LoadInternal(fd, &error);
if (status == kProfileLoadSuccess) {
@@ -187,7 +189,7 @@
((status == kProfileLoadVersionMismatch) || (status == kProfileLoadBadData))) {
LOG(WARNING) << "Clearing bad or obsolete profile data from file "
<< filename << ": " << error;
- if (flock.GetFile()->ClearContent()) {
+ if (profile_file->ClearContent()) {
return true;
} else {
PLOG(WARNING) << "Could not clear profile file: " << filename;
@@ -201,21 +203,22 @@
bool ProfileCompilationInfo::Save(const std::string& filename, uint64_t* bytes_written) {
ScopedTrace trace(__PRETTY_FUNCTION__);
- ScopedFlock flock;
std::string error;
int flags = O_WRONLY | O_NOFOLLOW | O_CLOEXEC;
// There's no need to fsync profile data right away. We get many chances
// to write it again in case something goes wrong. We can rely on a simple
// close(), no sync, and let to the kernel decide when to write to disk.
- if (!flock.Init(filename.c_str(), flags, /*block*/false, /*flush_on_close*/false, &error)) {
+ ScopedFlock profile_file = LockedFile::Open(filename.c_str(), flags,
+ /*block*/false, &error);
+ if (profile_file.get() == nullptr) {
LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
return false;
}
- int fd = flock.GetFile()->Fd();
+ int fd = profile_file->Fd();
// We need to clear the data because we don't support appending to the profiles yet.
- if (!flock.GetFile()->ClearContent()) {
+ if (!profile_file->ClearContent()) {
PLOG(WARNING) << "Could not clear profile file: " << filename;
return false;
}
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 2e2e8c3..4820feb 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -141,8 +141,8 @@
OatFileAssistant::~OatFileAssistant() {
// Clean up the lock file.
- if (flock_.HasFile()) {
- unlink(flock_.GetFile()->GetPath().c_str());
+ if (flock_.get() != nullptr) {
+ unlink(flock_->GetPath().c_str());
}
}
@@ -165,7 +165,7 @@
bool OatFileAssistant::Lock(std::string* error_msg) {
CHECK(error_msg != nullptr);
- CHECK(!flock_.HasFile()) << "OatFileAssistant::Lock already acquired";
+ CHECK(flock_.get() == nullptr) << "OatFileAssistant::Lock already acquired";
// Note the lock will only succeed for secondary dex files and in test
// environment.
@@ -179,7 +179,8 @@
// to generate oat files anyway.
std::string lock_file_name = dex_location_ + "." + GetInstructionSetString(isa_) + ".flock";
- if (!flock_.Init(lock_file_name.c_str(), error_msg)) {
+ flock_ = LockedFile::Open(lock_file_name.c_str(), error_msg);
+ if (flock_.get() == nullptr) {
unlink(lock_file_name.c_str());
return false;
}