OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/browser/android/url_request_content_job.h" | |
6 | |
7 #include "base/android/content_uri_utils.h" | |
8 #include "base/bind.h" | |
9 #include "base/files/file_util.h" | |
10 #include "base/message_loop/message_loop.h" | |
11 #include "base/task_runner.h" | |
12 #include "net/base/file_stream.h" | |
13 #include "net/base/io_buffer.h" | |
14 #include "net/base/net_errors.h" | |
15 #include "net/http/http_util.h" | |
16 #include "net/url_request/url_request_error_job.h" | |
17 #include "url/gurl.h" | |
18 | |
19 namespace content { | |
20 | |
21 // TODO(qinmin): Refactor this class to reuse the common code in | |
22 // url_request_file_job.cc. | |
23 URLRequestContentJob::ContentMetaInfo::ContentMetaInfo() | |
24 : content_exists(false), | |
25 content_size(0) { | |
26 } | |
27 | |
28 URLRequestContentJob::URLRequestContentJob( | |
29 net::URLRequest* request, | |
30 net::NetworkDelegate* network_delegate, | |
31 const base::FilePath& content_path, | |
32 const scoped_refptr<base::TaskRunner>& content_task_runner) | |
33 : net::URLRequestJob(request, network_delegate), | |
34 content_path_(content_path), | |
35 stream_(new net::FileStream(content_task_runner)), | |
36 content_task_runner_(content_task_runner), | |
37 remaining_bytes_(0), | |
38 io_pending_(false), | |
39 weak_ptr_factory_(this) {} | |
40 | |
41 void URLRequestContentJob::Start() { | |
42 ContentMetaInfo* meta_info = new ContentMetaInfo(); | |
43 content_task_runner_->PostTaskAndReply( | |
44 FROM_HERE, | |
45 base::Bind(&URLRequestContentJob::FetchMetaInfo, content_path_, | |
46 base::Unretained(meta_info)), | |
47 base::Bind(&URLRequestContentJob::DidFetchMetaInfo, | |
48 weak_ptr_factory_.GetWeakPtr(), | |
49 base::Owned(meta_info))); | |
50 } | |
51 | |
52 void URLRequestContentJob::Kill() { | |
53 stream_.reset(); | |
54 weak_ptr_factory_.InvalidateWeakPtrs(); | |
55 | |
56 net::URLRequestJob::Kill(); | |
57 } | |
58 | |
59 bool URLRequestContentJob::ReadRawData(net::IOBuffer* dest, | |
60 int dest_size, | |
61 int* bytes_read) { | |
62 DCHECK_GT(dest_size, 0); | |
63 DCHECK(bytes_read); | |
64 DCHECK_GE(remaining_bytes_, 0); | |
65 | |
66 if (remaining_bytes_ < dest_size) | |
67 dest_size = static_cast<int>(remaining_bytes_); | |
68 | |
69 // If we should copy zero bytes because |remaining_bytes_| is zero, short | |
70 // circuit here. | |
71 if (!dest_size) { | |
72 *bytes_read = 0; | |
73 return true; | |
74 } | |
75 | |
76 int rv = stream_->Read(dest, | |
77 dest_size, | |
78 base::Bind(&URLRequestContentJob::DidRead, | |
79 weak_ptr_factory_.GetWeakPtr(), | |
80 make_scoped_refptr(dest))); | |
81 if (rv >= 0) { | |
82 // Data is immediately available. | |
83 *bytes_read = rv; | |
84 remaining_bytes_ -= rv; | |
85 io_pending_ = true; | |
no sievers
2014/11/25 23:51:52
Don't you have to set this below only if |rv == ne
qinmin
2014/11/26 00:56:47
oops, fixed.
| |
86 DCHECK_GE(remaining_bytes_, 0); | |
87 return true; | |
88 } | |
89 | |
90 // Otherwise, a read error occured. We may just need to wait... | |
91 if (rv == net::ERR_IO_PENDING) { | |
92 SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0)); | |
93 } else { | |
94 NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, rv)); | |
95 } | |
96 return false; | |
97 } | |
98 | |
99 bool URLRequestContentJob::IsRedirectResponse(GURL* location, | |
100 int* http_status_code) { | |
101 return false; | |
102 } | |
103 | |
104 bool URLRequestContentJob::GetMimeType(std::string* mime_type) const { | |
105 DCHECK(request_); | |
106 if (!meta_info_.mime_type.empty()) { | |
107 *mime_type = meta_info_.mime_type; | |
108 return true; | |
109 } | |
110 return false; | |
111 } | |
112 | |
113 void URLRequestContentJob::SetExtraRequestHeaders( | |
114 const net::HttpRequestHeaders& headers) { | |
115 std::string range_header; | |
116 if (!headers.GetHeader(net::HttpRequestHeaders::kRange, &range_header)) | |
117 return; | |
118 | |
119 // We only care about "Range" header here. | |
120 std::vector<net::HttpByteRange> ranges; | |
121 if (net::HttpUtil::ParseRangeHeader(range_header, &ranges)) { | |
122 if (ranges.size() == 1) { | |
123 byte_range_ = ranges[0]; | |
124 } else { | |
125 // We don't support multiple range requests. | |
126 NotifyDone(net::URLRequestStatus( | |
127 net::URLRequestStatus::FAILED, | |
128 net::ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | |
129 } | |
130 } | |
131 } | |
132 | |
133 URLRequestContentJob::~URLRequestContentJob() {} | |
134 | |
135 void URLRequestContentJob::FetchMetaInfo(const base::FilePath& content_path, | |
136 ContentMetaInfo* meta_info) { | |
137 base::File::Info file_info; | |
138 meta_info->content_exists = base::GetFileInfo(content_path, &file_info); | |
139 if (meta_info->content_exists) { | |
140 meta_info->content_size = file_info.size; | |
141 meta_info->mime_type = base::GetContentUriMimeType(content_path); | |
142 } | |
143 } | |
144 | |
145 void URLRequestContentJob::DidFetchMetaInfo(const ContentMetaInfo* meta_info) { | |
146 meta_info_ = *meta_info; | |
147 | |
148 if (!meta_info_.content_exists) { | |
149 DidOpen(net::ERR_FILE_NOT_FOUND); | |
150 return; | |
151 } | |
152 | |
153 int flags = base::File::FLAG_OPEN | | |
154 base::File::FLAG_READ | | |
155 base::File::FLAG_ASYNC; | |
156 int rv = stream_->Open(content_path_, flags, | |
157 base::Bind(&URLRequestContentJob::DidOpen, | |
158 weak_ptr_factory_.GetWeakPtr())); | |
159 if (rv != net::ERR_IO_PENDING) | |
160 DidOpen(rv); | |
161 } | |
162 | |
163 void URLRequestContentJob::DidOpen(int result) { | |
164 if (result != net::OK) { | |
165 NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result)); | |
166 return; | |
167 } | |
168 | |
169 if (!byte_range_.ComputeBounds(meta_info_.content_size)) { | |
170 NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, | |
171 net::ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | |
172 return; | |
173 } | |
174 | |
175 remaining_bytes_ = byte_range_.last_byte_position() - | |
176 byte_range_.first_byte_position() + 1; | |
177 DCHECK_GE(remaining_bytes_, 0); | |
178 | |
179 if (remaining_bytes_ > 0 && byte_range_.first_byte_position() != 0) { | |
180 int rv = stream_->Seek(base::File::FROM_BEGIN, | |
181 byte_range_.first_byte_position(), | |
182 base::Bind(&URLRequestContentJob::DidSeek, | |
183 weak_ptr_factory_.GetWeakPtr())); | |
184 if (rv != net::ERR_IO_PENDING) { | |
185 // stream_->Seek() failed, so pass an intentionally erroneous value | |
186 // into DidSeek(). | |
187 DidSeek(-1); | |
188 } | |
189 } else { | |
190 // We didn't need to call stream_->Seek() at all, so we pass to DidSeek() | |
191 // the value that would mean seek success. This way we skip the code | |
192 // handling seek failure. | |
193 DidSeek(byte_range_.first_byte_position()); | |
194 } | |
195 } | |
196 | |
197 void URLRequestContentJob::DidSeek(int64 result) { | |
198 if (result != byte_range_.first_byte_position()) { | |
199 NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, | |
200 net::ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | |
201 return; | |
202 } | |
203 | |
204 set_expected_content_size(remaining_bytes_); | |
205 NotifyHeadersComplete(); | |
206 } | |
207 | |
208 void URLRequestContentJob::DidRead( | |
209 scoped_refptr<net::IOBuffer> buf, int result) { | |
210 if (result > 0) { | |
211 SetStatus(net::URLRequestStatus()); // Clear the IO_PENDING status | |
212 remaining_bytes_ -= result; | |
213 DCHECK_GE(remaining_bytes_, 0); | |
214 } | |
215 | |
216 DCHECK(io_pending_); | |
217 io_pending_ = false; | |
218 | |
219 DCHECK_LE(result, 0); | |
220 if (result == 0) { | |
221 NotifyDone(net::URLRequestStatus()); | |
222 } else if (result < 0) { | |
223 NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result)); | |
224 } | |
225 | |
226 NotifyReadComplete(result); | |
227 } | |
228 | |
229 } // namespace content | |
OLD | NEW |