relevance 4 | ../test/test_dht.cpp:1250 | pass in the actual salt as a parameter |
pass in the actual salt as a parameter../test/test_dht.cpp:1250
key_desc_t const desc_error[] =
{
{ "e", bdecode_node::list_t, 2, 0 },
{ "y", bdecode_node::string_t, 1, 0},
};
bdecode_node desc_error_keys[2];
// ==== get / put mutable items ===
span<char const> itemv;
signature sig;
char buffer[1200];
sequence_number seq(4);
public_key pk;
secret_key sk;
get_test_keypair(pk, sk);
for (int with_salt = 0; with_salt < 2; ++with_salt)
{
seq = sequence_number(4);
std::printf("\nTEST GET/PUT%s \ngenerating ed25519 keys\n\n"
, with_salt ? " with-salt" : " no-salt");
std::array<char, 32> seed = ed25519_create_seed();
std::tie(pk, sk) = ed25519_create_keypair(seed);
std::printf("pub: %s priv: %s\n"
, aux::to_hex(pk.bytes).c_str()
, aux::to_hex(sk.bytes).c_str());
std::string salt;
if (with_salt) salt = "foobar";
hasher h(pk.bytes);
if (with_salt) h.update(salt);
sha1_hash target_id = h.final();
std::printf("target_id: %s\n"
, aux::to_hex(target_id).c_str());
send_dht_request(t.dht_node, "get", t.source, &response
, msg_args().target(target_id));
key_desc_t const desc[] =
{
{ "r", bdecode_node::dict_t, 0, key_desc_t::parse_children },
{ "id", bdecode_node::string_t, 20, 0},
{ "token", bdecode_node::string_t, 0, 0},
{ "ip", bdecode_node::string_t, 0, key_desc_t::optional | key_desc_t::last_child},
| ||
relevance 4 | ../test/test_dht.cpp:2138 | pass in th actual salt as the argument |
pass in th actual salt as the argument../test/test_dht.cpp:2138 }
}
g_got_peers.clear();
}
} // anonymous namespace
TORRENT_TEST(get_peers_v4)
{
test_get_peers(rand_v4);
}
TORRENT_TEST(get_peers_v6)
{
if (supports_ipv6())
test_get_peers(rand_v6);
}
namespace {
void test_mutable_get(address(&rand_addr)(), bool const with_salt)
{
dht_test_setup t(udp::endpoint(rand_addr(), 20));
public_key pk;
secret_key sk;
get_test_keypair(pk, sk);
char buffer[1200];
sequence_number seq(4);
span<char const> itemv;
bdecode_node response;
std::string salt;
if (with_salt) salt = "foobar";
// mutable get
g_sent_packets.clear();
udp::endpoint const initial_node(rand_addr(), 1234);
dht::node_id const initial_node_id = to_hash("1111111111222222222233333333334444444444");
t.dht_node.m_table.add_node(node_entry{initial_node_id, initial_node, 10, true});
g_put_item.assign(items[0].ent, salt, seq, pk, sk);
t.dht_node.put_item(pk, std::string()
, std::bind(&put_mutable_item_cb, _1, _2, 0)
, put_mutable_item_data_cb);
TEST_EQUAL(g_sent_packets.size(), 1);
| ||
relevance 3 | ../test/test_dht.cpp:118 | make the mock_socket hold a reference to the list of where to record packets instead of having a global variable |
make the mock_socket hold a reference to the list of where to record
packets instead of having a global variable../test/test_dht.cpp:118}
void add_and_replace(node_id& dst, node_id const& add)
{
bool carry = false;
for (int k = 19; k >= 0; --k)
{
int sum = dst[k] + add[k] + (carry ? 1 : 0);
dst[k] = sum & 255;
carry = sum > 255;
}
}
void node_push_back(std::vector<node_entry>* nv, node_entry const& n)
{
nv->push_back(n);
}
void nop_node() {}
std::list<std::pair<udp::endpoint, entry>> g_sent_packets;
struct mock_socket final : socket_manager
{
bool has_quota() override { return true; }
bool send_packet(aux::listen_socket_handle const&, entry& msg, udp::endpoint const& ep) override
{
| ||
relevance 3 | ../test/test_dht.cpp:127 | ideally the mock_socket would contain this queue of packets, to make tests independent |
ideally the mock_socket would contain this queue of packets, to
make tests independent../test/test_dht.cpp:127 int sum = dst[k] + add[k] + (carry ? 1 : 0);
dst[k] = sum & 255;
carry = sum > 255;
}
}
void node_push_back(std::vector<node_entry>* nv, node_entry const& n)
{
nv->push_back(n);
}
void nop_node() {}
std::list<std::pair<udp::endpoint, entry>> g_sent_packets;
struct mock_socket final : socket_manager
{
bool has_quota() override { return true; }
bool send_packet(aux::listen_socket_handle const&, entry& msg, udp::endpoint const& ep) override
{
g_sent_packets.push_back(std::make_pair(ep, msg));
return true;
}
};
std::shared_ptr<aux::listen_socket_t> dummy_listen_socket(udp::endpoint src)
{
auto ret = std::make_shared<aux::listen_socket_t>();
ret->local_endpoint = tcp::endpoint(src.address(), src.port());
ret->external_address.cast_vote(src.address()
, aux::session_interface::source_dht, rand_v4());
return ret;
}
std::shared_ptr<aux::listen_socket_t> dummy_listen_socket4()
{
auto ret = std::make_shared<aux::listen_socket_t>();
ret->local_endpoint = tcp::endpoint(addr4("192.168.4.1"), 6881);
ret->external_address.cast_vote(addr4("236.0.0.1")
, aux::session_interface::source_dht, rand_v4());
return ret;
}
std::shared_ptr<aux::listen_socket_t> dummy_listen_socket6()
{
auto ret = std::make_shared<aux::listen_socket_t>();
ret->local_endpoint = tcp::endpoint(addr6("2002::1"), 6881);
ret->external_address.cast_vote(addr6("2002::1")
, aux::session_interface::source_dht, rand_v6());
return ret;
}
| ||
relevance 3 | ../test/test_dht.cpp:1207 | split this up into smaller tests |
split this up into smaller tests../test/test_dht.cpp:1207 }
lt::aux::array<node_entry, 9> build_nodes(sha1_hash target)
{
return lt::aux::array<node_entry, 9>(
std::array<node_entry, 9> {
{ { target, udp::endpoint(addr4("1.1.1.1"), 1231), 10, true}
, { target, udp::endpoint(addr4("2.2.2.2"), 1232), 10, true}
, { target, udp::endpoint(addr4("3.3.3.3"), 1233), 10, true}
, { target, udp::endpoint(addr4("4.4.4.4"), 1234), 10, true}
, { target, udp::endpoint(addr4("5.5.5.5"), 1235), 10, true}
, { target, udp::endpoint(addr4("6.6.6.6"), 1236), 10, true}
, { target, udp::endpoint(addr4("7.7.7.7"), 1237), 10, true}
, { target, udp::endpoint(addr4("8.8.8.8"), 1238), 10, true}
, { target, udp::endpoint(addr4("9.9.9.9"), 1239), 10, true} }
});
}
span<char const> const empty_salt;
void test_put(address(&rand_addr)())
{
dht_test_setup t(udp::endpoint(rand_addr(), 20));
bdecode_node response;
bool ret;
// ====== put ======
init_rand_address();
udp::endpoint eps[1000];
for (int i = 0; i < 1000; ++i)
eps[i] = udp::endpoint(rand_addr(), std::uint16_t(random(16534) + 1));
announce_immutable_items(t.dht_node, eps, items, sizeof(items)/sizeof(items[0]));
key_desc_t const desc2[] =
{
{ "y", bdecode_node::string_t, 1, 0 }
};
bdecode_node desc2_keys[1];
key_desc_t const desc_error[] =
{
{ "e", bdecode_node::list_t, 2, 0 },
{ "y", bdecode_node::string_t, 1, 0},
};
bdecode_node desc_error_keys[2];
| ||
relevance 3 | ../test/test_dht.cpp:2607 | use dht_test_setup class to simplify the node setup |
use dht_test_setup class to simplify the node setup../test/test_dht.cpp:2607 args.nodes({nodes[8]});
send_dht_response(t.dht_node, response, nodes[i].ep(), args);
g_sent_packets.erase(packet);
// once we've sent the response from the farthest node, we're done
if (i == 0) break;
}
TEST_EQUAL(g_put_count, 1);
// k nodes should now have outstanding put requests
TEST_EQUAL(g_sent_packets.size(), 8);
g_sent_packets.clear();
g_put_item.clear();
g_put_count = 0;
}
TORRENT_TEST(dht_dual_stack)
{
auto sett = test_settings();
mock_socket s;
auto sock4 = dummy_listen_socket4();
auto sock6 = dummy_listen_socket6();
obs observer;
counters cnt;
node* node4p = nullptr, *node6p = nullptr;
auto get_foreign_node = [&](node_id const&, std::string const& family)
{
if (family == "n4") return node4p;
if (family == "n6") return node6p;
TEST_CHECK(false);
return static_cast<node*>(nullptr);
};
std::unique_ptr<dht_storage_interface> dht_storage(dht_default_storage_constructor(sett));
dht_storage->update_node_ids({node_id(nullptr)});
dht::node node4(sock4, &s, sett, node_id(nullptr), &observer, cnt, get_foreign_node, *dht_storage);
dht::node node6(sock6, &s, sett, node_id(nullptr), &observer, cnt, get_foreign_node, *dht_storage);
node4p = &node4;
node6p = &node6;
// DHT should be running on port 48199 now
bdecode_node response;
char error_string[200];
bool ret;
node_id id = to_hash("3123456789abcdef01232456789abcdef0123456");
node4.m_table.node_seen(id, udp::endpoint(addr("4.4.4.4"), 4440), 10);
node6.m_table.node_seen(id, udp::endpoint(addr("4::4"), 4441), 10);
// v4 node requesting v6 nodes
| ||
relevance 3 | ../test/test_dht.cpp:3152 | use dht_test_setup class to simplify the node setup |
use dht_test_setup class to simplify the node setup../test/test_dht.cpp:3152
bdecode_node response;
send_dht_request(t.dht_node, "ping", t.source, &response);
dht::key_desc_t const pong_desc[] = {
{ "y", bdecode_node::string_t, 1, 0 },
{ "t", bdecode_node::string_t, 2, 0 },
{ "r", bdecode_node::dict_t, 0, key_desc_t::parse_children },
{ "id", bdecode_node::string_t, 20, key_desc_t::last_child },
};
bdecode_node pong_keys[4];
bool ret = dht::verify_message(response, pong_desc, pong_keys, t.error_string);
TEST_CHECK(ret);
if (!ret) return;
TEST_EQUAL(node_id(pong_keys[3].string_ptr()), t.dht_node.nid());
}
TORRENT_TEST(read_only_node)
{
auto sett = test_settings();
sett.set_bool(settings_pack::dht_read_only, true);
mock_socket s;
auto ls = dummy_listen_socket4();
obs observer;
counters cnt;
std::unique_ptr<dht_storage_interface> dht_storage(dht_default_storage_constructor(sett));
dht_storage->update_node_ids({node_id(nullptr)});
dht::node node(ls, &s, sett, node_id(nullptr), &observer, cnt, get_foreign_node_stub, *dht_storage);
udp::endpoint source(addr("10.0.0.1"), 20);
bdecode_node response;
msg_args args;
// for incoming requests, read_only node won't response.
send_dht_request(node, "ping", source, &response, args, "10", false);
TEST_EQUAL(response.type(), bdecode_node::none_t);
args.target(sha1_hash("01010101010101010101"));
send_dht_request(node, "get", source, &response, args, "10", false);
TEST_EQUAL(response.type(), bdecode_node::none_t);
// also, the sender shouldn't be added to routing table.
TEST_EQUAL(std::get<0>(node.size()), 0);
// for outgoing requests, read_only node will add 'ro' key (value == 1)
// in top-level of request.
bdecode_node parsed[7];
char error_string[200];
udp::endpoint initial_node(addr("4.4.4.4"), 1234);
dht::node_id const initial_node_id = to_hash("1111111111222222222233333333334444444444");
| ||
relevance 3 | ../test/test_dht.cpp:3251 | use dht_test_setup class to simplify the node setup |
use dht_test_setup class to simplify the node setup../test/test_dht.cpp:3251 // both of them shouldn't have a 'ro' key.
node_from_entry(g_sent_packets.front().second, request);
ret = verify_message(request, get_item_desc_ro, parsed, error_string);
TEST_CHECK(ret);
TEST_CHECK(!parsed[3]);
node_from_entry(g_sent_packets.back().second, request);
ret = verify_message(request, get_item_desc_ro, parsed, error_string);
TEST_CHECK(ret);
TEST_CHECK(!parsed[3]);
#endif
}
#ifndef TORRENT_DISABLE_LOGGING
// these tests rely on logging being enabled
TORRENT_TEST(invalid_error_msg)
{
auto sett = test_settings();
mock_socket s;
auto ls = dummy_listen_socket4();
obs observer;
counters cnt;
std::unique_ptr<dht_storage_interface> dht_storage(dht_default_storage_constructor(sett));
dht_storage->update_node_ids({node_id(nullptr)});
dht::node node(ls, &s, sett, node_id(nullptr), &observer, cnt, get_foreign_node_stub, *dht_storage);
udp::endpoint source(addr("10.0.0.1"), 20);
entry e;
e["y"] = "e";
e["e"].string() = "Malformed Error";
char msg_buf[1500];
int size = bencode(msg_buf, e);
bdecode_node decoded;
error_code ec;
bdecode(msg_buf, msg_buf + size, decoded, ec);
if (ec) std::printf("bdecode failed: %s\n", ec.message().c_str());
dht::msg m(decoded, source);
node.incoming(node.m_sock, m);
bool found = false;
for (auto const& log : observer.m_log)
{
if (log.find("INCOMING ERROR") != std::string::npos
&& log.find("(malformed)") != std::string::npos)
found = true;
| ||
relevance 3 | ../test/test_dht.cpp:3344 | use dht_test_setup class to simplify the node setup |
use dht_test_setup class to simplify the node setup../test/test_dht.cpp:3344 TEST_CHECK(algo->num_sorted_results() == 0);
auto results = algo->results();
TEST_CHECK(results.size() == eps.size());
for (std::size_t i = 0; i < eps.size(); ++i)
TEST_CHECK(eps[i] == results[i]->target_ep());
// setting the node ID, regardless of what we set it to, should cause this
// observer to become sorted. i.e. be moved to the beginning of the result
// list.
results[5]->set_id(node_id("abababababababababab"));
TEST_CHECK(algo->num_sorted_results() == 1);
results = algo->results();
TEST_CHECK(results.size() == eps.size());
TEST_CHECK(eps[5] == results[0]->target_ep());
algo->done();
}
TORRENT_TEST(rpc_invalid_error_msg)
{
auto sett = test_settings();
mock_socket s;
auto ls = dummy_listen_socket4();
obs observer;
counters cnt;
dht::routing_table table(node_id(), udp::v4(), 8, sett, &observer);
dht::rpc_manager rpc(node_id(), sett, table, ls, &s, &observer);
std::unique_ptr<dht_storage_interface> dht_storage(dht_default_storage_constructor(sett));
dht_storage->update_node_ids({node_id(nullptr)});
dht::node node(ls, &s, sett, node_id(nullptr), &observer, cnt, get_foreign_node_stub, *dht_storage);
udp::endpoint source(addr("10.0.0.1"), 20);
// we need this to create an entry for this transaction ID, otherwise the
// incoming message will just be dropped
entry req;
req["y"] = "q";
req["q"] = "bogus_query";
req["t"] = "\0\0\0\0";
g_sent_packets.clear();
auto algo = std::make_shared<dht::traversal_algorithm>(node, node_id());
auto o = rpc.allocate_observer<null_observer>(std::move(algo), source, node_id());
#if TORRENT_USE_ASSERTS
o->m_in_constructor = false;
#endif
o->flags |= observer::flag_queried;
rpc.invoke(req, source, o);
| ||
relevance 3 | ../src/torrent.cpp:418 | we could probably get away with just saving a few fields here |
we could probably get away with just saving a few fields here../src/torrent.cpp:418 // --- V2 HASHES ---
if (m_torrent_file->is_valid() && m_torrent_file->info_hashes().has_v2())
{
if (!p.merkle_trees.empty())
load_merkle_trees(
std::move(p.merkle_trees)
, std::move(p.merkle_tree_mask)
, std::move(p.verified_leaf_hashes));
// we really don't want to store extra copies of the trees
TORRENT_ASSERT(p.merkle_trees.empty());
}
if (valid_metadata())
{
inc_stats_counter(counters::num_total_pieces_added
, m_torrent_file->num_pieces());
}
m_add_torrent_params = std::make_unique<add_torrent_params>(std::move(p));
}
void torrent::load_merkle_trees(
aux::vector<std::vector<sha256_hash>, file_index_t> trees_import
, aux::vector<std::vector<bool>, file_index_t> mask
, aux::vector<std::vector<bool>, file_index_t> verified)
{
auto const& fs = m_torrent_file->orig_files();
std::vector<bool> const empty_verified;
for (file_index_t i{0}; i < fs.end_file(); ++i)
{
if (fs.pad_file_at(i) || fs.file_size(i) == 0)
continue;
if (i >= trees_import.end_index()) break;
std::vector<bool> const& verified_bitmask = (i >= verified.end_index()) ? empty_verified : verified[i];
if (i < mask.end_index() && !mask[i].empty())
{
mask[i].resize(m_merkle_trees[i].size(), false);
m_merkle_trees[i].load_sparse_tree(trees_import[i], mask[i], verified_bitmask);
}
else
{
m_merkle_trees[i].load_tree(trees_import[i], verified_bitmask);
}
}
}
void torrent::inc_stats_counter(int c, int value)
| ||
relevance 3 | ../src/torrent.cpp:714 | assert there are no outstanding async operations on this torrent |
assert there are no outstanding async operations on this
torrent../src/torrent.cpp:714#endif
if (!m_ses.dht()) return false;
if (m_torrent_file->is_valid() && !m_files_checked) return false;
if (!m_announce_to_dht) return false;
if (m_paused) return false;
// don't announce private torrents
if (m_torrent_file->is_valid() && m_torrent_file->priv()) return false;
if (m_trackers.empty()) return true;
if (!settings().get_bool(settings_pack::use_dht_as_fallback)) return true;
return std::none_of(m_trackers.begin(), m_trackers.end()
, [](aux::announce_entry const& tr) { return bool(tr.verified); });
}
#endif
torrent::~torrent()
{
#if TORRENT_USE_ASSERTS
for (torrent_list_index_t i{}; i != m_links.end_index(); ++i)
{
if (!m_links[i].in_list()) continue;
m_links[i].unlink(m_ses.torrent_list(i), i);
}
#endif
// The invariant can't be maintained here, since the torrent
// is being destructed, all weak references to it have been
// reset, which means that all its peers already have an
// invalidated torrent pointer (so it cannot be verified to be correct)
// i.e. the invariant can only be maintained if all connections have
// been closed by the time the torrent is destructed. And they are
// supposed to be closed. So we can still do the invariant check.
// however, the torrent object may be destructed from the main
// thread when shutting down, if the disk cache has references to it.
// this means that the invariant check that this is called from the
// network thread cannot be maintained
TORRENT_ASSERT(m_peer_class == peer_class_t{0});
TORRENT_ASSERT(m_connections.empty());
// just in case, make sure the session accounting is kept right
for (auto p : m_connections)
m_ses.close_connection(p);
}
void torrent::read_piece(piece_index_t const piece)
| ||
relevance 3 | ../src/torrent.cpp:1326 | there's some duplication between this function and peer_connection::incoming_piece(). is there a way to merge something? |
there's some duplication between this function and
peer_connection::incoming_piece(). is there a way to merge something?../src/torrent.cpp:1326 piece_picker& m_picker;
piece_index_t m_piece;
};
void torrent::add_piece_async(piece_index_t const piece
, std::vector<char> data, add_piece_flags_t const flags)
{
TORRENT_ASSERT(is_single_thread());
// make sure the piece index is correct
if (piece >= torrent_file().end_piece())
return;
// make sure the piece size is correct
if (data.size() != std::size_t(m_torrent_file->piece_size(piece)))
return;
add_piece(piece, data.data(), flags);
}
void torrent::add_piece(piece_index_t const piece, char const* data
, add_piece_flags_t const flags)
{
TORRENT_ASSERT(is_single_thread());
// make sure the piece index is correct
if (piece >= torrent_file().end_piece())
return;
int const piece_size = m_torrent_file->piece_size(piece);
int const blocks_in_piece = (piece_size + block_size() - 1) / block_size();
if (m_deleted) return;
// avoid crash trying to access the picker when there is none
if (m_have_all && !has_picker()) return;
// we don't support clobbering the piece picker while checking the
// files. We may end up having the same piece multiple times
TORRENT_ASSERT_PRECOND(state() != torrent_status::checking_files
&& state() != torrent_status::checking_resume_data);
if (state() == torrent_status::checking_files
|| state() == torrent_status::checking_resume_data)
return;
need_picker();
if (picker().have_piece(piece)
&& !(flags & torrent_handle::overwrite_existing))
return;
| ||
relevance 3 | ../src/torrent.cpp:3951 | this could probably be pulled out into a free function |
this could probably be pulled out into a free function../src/torrent.cpp:3951 std::int64_t calc_bytes(file_storage const& fs, piece_count const& pc)
{
// it's an impossible combination to have 0 pieces, but still have one of them be the last piece
TORRENT_ASSERT(!(pc.num_pieces == 0 && pc.last_piece == true));
// if we have 0 pieces, we can't have any pad blocks either
TORRENT_ASSERT(!(pc.num_pieces == 0 && pc.pad_bytes > 0));
// if we have all pieces, we must also have the last one
TORRENT_ASSERT(!(pc.num_pieces == fs.num_pieces() && pc.last_piece == false));
// every block should not be a pad block
TORRENT_ASSERT(pc.pad_bytes <= std::int64_t(pc.num_pieces) * fs.piece_length());
return std::int64_t(pc.num_pieces) * fs.piece_length()
- (pc.last_piece ? fs.piece_length() - fs.piece_size(fs.last_piece()) : 0)
- std::int64_t(pc.pad_bytes);
}
// fills in total_wanted, total_wanted_done and total_done
void torrent::bytes_done(torrent_status& st, status_flags_t const flags) const
{
INVARIANT_CHECK;
st.total_done = 0;
st.total_wanted_done = 0;
st.total_wanted = m_size_on_disk;
TORRENT_ASSERT(st.total_wanted <= m_torrent_file->total_size());
TORRENT_ASSERT(st.total_wanted >= 0);
TORRENT_ASSERT(!valid_metadata() || m_torrent_file->num_pieces() > 0);
if (!valid_metadata()) return;
if (m_seed_mode || is_seed())
{
// once we're a seed and remove the piece picker, we stop tracking
// piece- and file priority. We consider everything as being
// "wanted"
st.total_done = m_torrent_file->total_size() - m_padding_bytes;
st.total_wanted_done = m_size_on_disk;
st.total_wanted = m_size_on_disk;
TORRENT_ASSERT(st.total_wanted <= st.total_done);
TORRENT_ASSERT(st.total_wanted_done <= st.total_wanted);
TORRENT_ASSERT(st.total_done <= m_torrent_file->total_size());
return;
}
else if (!has_picker())
{
st.total_done = 0;
st.total_wanted_done = 0;
| ||
relevance 3 | ../src/torrent.cpp:4886 | should this alert have an error code in it? |
should this alert have an error code in it?../src/torrent.cpp:4886 on_remove_peers();
TORRENT_ASSERT(m_connections.empty());
// post a message to the main thread to destruct
// the torrent object from there
if (m_storage)
{
try {
m_ses.disk_thread().async_stop_torrent(m_storage
, std::bind(&torrent::on_torrent_aborted, shared_from_this()));
}
catch (std::exception const& e)
{
TORRENT_UNUSED(e);
m_storage.reset();
#ifndef TORRENT_DISABLE_LOGGING
debug_log("Failed to flush disk cache: %s", e.what());
#endif
// clients may rely on this alert to be posted, so it's probably a
// good idea to post it here, even though we failed
if (alerts().should_post<cache_flushed_alert>())
alerts().emplace_alert<cache_flushed_alert>(get_handle());
}
m_ses.deferred_submit_jobs();
}
else
{
if (alerts().should_post<cache_flushed_alert>())
alerts().emplace_alert<cache_flushed_alert>(get_handle());
alerts().emplace_alert<torrent_removed_alert>(get_handle()
, info_hash(), get_userdata());
}
| ||
relevance 3 | ../src/torrent.cpp:4956 | this should return optional<>. piece index -1 should not be allowed |
this should return optional<>. piece index -1 should not be
allowed../src/torrent.cpp:4956 }
#ifndef TORRENT_DISABLE_SUPERSEEDING
void torrent::set_super_seeding(bool const on)
{
if (on == m_super_seeding) return;
m_super_seeding = on;
set_need_save_resume(torrent_handle::if_state_changed);
state_updated();
if (m_super_seeding) return;
// disable super seeding for all peers
for (auto pc : *this)
{
pc->superseed_piece(piece_index_t(-1), piece_index_t(-1));
}
}
piece_index_t torrent::get_piece_to_super_seed(typed_bitfield<piece_index_t> const& bits)
{
// return a piece with low availability that is not in
// the bitfield and that is not currently being super
// seeded by any peer
TORRENT_ASSERT(m_super_seeding);
// do a linear search from the first piece
int min_availability = 9999;
std::vector<piece_index_t> avail_vec;
for (auto const i : m_torrent_file->piece_range())
{
if (bits[i]) continue;
int availability = 0;
for (auto pc : *this)
{
if (pc->super_seeded_piece(i))
{
// avoid super-seeding the same piece to more than one
// peer if we can avoid it. Do this by artificially
// increase the availability
availability = 999;
break;
}
if (pc->has_piece(i)) ++availability;
}
if (availability > min_availability) continue;
if (availability == min_availability)
{
avail_vec.push_back(i);
| ||
relevance 3 | ../src/web_peer_connection.cpp:199 | this should be an optional, piece index -1 should not be allowed |
this should be an optional, piece index -1 should
not be allowed../src/web_peer_connection.cpp:199
auto const range = aux::file_piece_range_inclusive(fs, i);
for (piece_index_t k = std::get<0>(range); k < std::get<1>(range); ++k)
have.clear_bit(k);
}
t->set_seed(peer_info_struct(), false);
if (have.none_set())
{
incoming_have_none();
m_web->interesting = false;
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "WEB-SEED", "have no pieces, not interesting. %s", m_url.c_str());
#endif
}
else
{
incoming_bitfield(have);
}
}
if (m_web->restart_request.piece != piece_index_t(-1))
{
// increase the chances of requesting the block
// we have partial data for already, to finish it
incoming_suggest(m_web->restart_request.piece);
}
web_connection_base::on_connected();
}
void web_peer_connection::disconnect(error_code const& ec
, operation_t op, disconnect_severity_t const error)
{
if (is_disconnecting()) return;
if (op == operation_t::sock_write && ec == boost::system::errc::broken_pipe)
{
#ifndef TORRENT_DISABLE_LOGGING
// a write operation failed with broken-pipe. This typically happens
// with HTTP 1.0 servers that close their incoming channel of the TCP
// stream whenever they're done reading one full request. Instead of
// us bailing out and failing the entire request just because our
// write-end was closed, ignore it and keep reading until the read-end
// also is closed.
peer_log(peer_log_alert::info, "WRITE_DIRECTION", "CLOSED");
#endif
// prevent the peer from trying to send anything more
m_send_buffer.clear();
// when the web server closed our write-end of the socket (i.e. its
// read-end), if it's an HTTP 1.0 server. we will stop sending more
| ||
relevance 3 | ../src/web_peer_connection.cpp:419 | do we really need a special case here? wouldn't the multi-file case handle single file torrents correctly too? |
do we really need a special case here? wouldn't the multi-file
case handle single file torrents correctly too?../src/web_peer_connection.cpp:419 size -= pr.length;
}
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::outgoing_message, "REQUESTING", "(piece: %d start: %d) - (piece: %d end: %d)"
, static_cast<int>(r.piece), r.start
, static_cast<int>(pr.piece), pr.start + pr.length);
#endif
bool const single_file_request = t->torrent_file().num_files() == 1;
int const proxy_type = m_settings.get_int(settings_pack::proxy_type);
bool const using_proxy = (proxy_type == settings_pack::http
|| proxy_type == settings_pack::http_pw) && !m_ssl;
// the number of pad files that have been "requested". In case we _only_
// request padfiles, we can't rely on handling them in the on_receive()
// callback (because we won't receive anything), instead we have to post a
// pretend read callback where we can deliver the zeroes for the partfile
int num_pad_files = 0;
if (single_file_request)
{
file_request_t file_req;
file_req.file_index = file_index_t(0);
file_req.start = std::int64_t(static_cast<int>(req.piece)) * info.piece_length()
+ req.start;
file_req.length = req.length;
request += "GET ";
// do not encode single file paths, they are
// assumed to be encoded in the torrent file
request += using_proxy ? m_url : m_path;
request += " HTTP/1.1\r\n";
add_headers(request, m_settings, using_proxy);
request += "\r\nRange: bytes=";
request += to_string(file_req.start).data();
request += "-";
request += to_string(file_req.start + file_req.length - 1).data();
request += "\r\n\r\n";
m_first_request = false;
m_file_requests.push_back(file_req);
}
else
{
std::vector<file_slice> files = info.orig_files().map_block(req.piece, req.start
, req.length);
for (auto const &f : files)
{
file_request_t file_req;
| ||
relevance 3 | ../src/web_peer_connection.cpp:504 | file_index_t should not allow negative values |
file_index_t should not allow negative values../src/web_peer_connection.cpp:504 // with the correct slashes. Don't encode it again
request += m_path;
}
request += escape_file_path(info.orig_files(), f.file_index);
}
request += " HTTP/1.1\r\n";
add_headers(request, m_settings, using_proxy);
request += "\r\nRange: bytes=";
request += to_string(f.offset).data();
request += "-";
request += to_string(f.offset + f.size - 1).data();
request += "\r\n\r\n";
m_first_request = false;
#if 0
std::cerr << this << " SEND-REQUEST: f: " << f.file_index
<< " s: " << f.offset
<< " e: " << (f.offset + f.size - 1) << std::endl;
#endif
TORRENT_ASSERT(f.file_index >= file_index_t(0));
m_file_requests.push_back(file_req);
}
}
if (num_pad_files == int(m_file_requests.size()))
{
post(get_context(), std::bind(
&web_peer_connection::on_receive_padfile,
std::static_pointer_cast<web_peer_connection>(self())));
return;
}
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::outgoing_message, "REQUEST", "%s", request.c_str());
#endif
send_buffer(request);
}
namespace {
std::string get_peer_name(http_parser const& p, std::string const& host)
{
std::string const& server_version = p.header("server");
if (!server_version.empty())
return server_version;
return host;
}
| ||
relevance 3 | ../src/web_peer_connection.cpp:695 | this could be made more efficient for the case when we use an HTTP proxy. Then we wouldn't need to add new web seeds to the torrent, we could just make the redirect table contain full URLs. |
this could be made more efficient for the case when we use an
HTTP proxy. Then we wouldn't need to add new web seeds to the torrent,
we could just make the redirect table contain full URLs.../src/web_peer_connection.cpp:695 bool const single_file_request = !m_path.empty()
&& m_path[m_path.size() - 1] != '/';
// when SSRF mitigation is enabled, a web seed on the internet (is_global())
// is not allowed to redirect to a server on the local network, so we set
// the no_local_ips flag
auto const web_seed_flags = torrent::ephemeral
| ((m_settings.get_bool(settings_pack::ssrf_mitigation) && aux::is_global(remote().address()))
? torrent::no_local_ips : web_seed_flag_t{});
// add the redirected url and remove the current one
if (!single_file_request)
{
TORRENT_ASSERT(!m_file_requests.empty());
file_index_t const file_index = m_file_requests.front().file_index;
location = resolve_redirect_location(m_url, location);
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "LOCATION", "%s", location.c_str());
#endif
std::string redirect_base;
std::string redirect_path;
error_code ec;
std::tie(redirect_base, redirect_path) = split_url(location, ec);
if (ec)
{
// we should not try this server again.
disconnect(errors::missing_location, operation_t::bittorrent, failure);
return;
}
// add_web_seed won't add duplicates. If we have already added an entry
// with this URL, we'll get back the existing entry
// "ephemeral" flag should be set to avoid "web_seed_t" saving in resume data.
// E.g. original "web_seed_t" request url points to "http://example1.com/file1" and
// web server responses with redirect location "http://example2.com/subpath/file2".
// "handle_redirect" process this location to create new "web_seed_t"
// with base url=="http://example2.com/" and redirects[0]=="/subpath/file2").
// If we try to load resume with such "web_seed_t" then "web_peer_connection" will send
// request with wrong path "http://example2.com/file1" (cause "redirects" map is not serialized in resume)
web_seed_t* web = t->add_web_seed(redirect_base, web_seed_entry::url_seed
, m_external_auth, m_extra_headers, web_seed_flags);
web->have_files.resize(t->torrent_file().num_files(), false);
// the new web seed we're adding only has this file for now
// we may add more files later
web->redirects[file_index] = redirect_path;
if (web->have_files.get_bit(file_index) == false)
{
| ||
relevance 3 | ../src/peer_connection.cpp:3119 | instead of having to ask the torrent whether it's in graceful pause mode or not, the peers should keep that state (and the torrent should update them when it enters graceful pause). When a peer enters graceful pause mode, it should cancel all outstanding requests and clear its request queue. |
instead of having to ask the torrent whether it's in graceful
pause mode or not, the peers should keep that state (and the torrent
should update them when it enters graceful pause). When a peer enters
graceful pause mode, it should cancel all outstanding requests and
clear its request queue.../src/peer_connection.cpp:3119 // to disk or are in the disk write cache
if (picker.is_piece_finished(p.piece) && !was_finished)
{
#if TORRENT_USE_INVARIANT_CHECKS
check_postcondition post_checker2_(t, false);
#endif
t->verify_piece(p.piece);
}
check_graceful_pause();
if (is_disconnecting()) return;
if (request_a_block(*t, *this))
m_counters.inc_stats_counter(counters::incoming_piece_picks);
send_block_requests();
}
void peer_connection::check_graceful_pause()
{
std::shared_ptr<torrent> t = m_torrent.lock();
if (!t || !t->graceful_pause()) return;
if (m_outstanding_bytes > 0) return;
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "GRACEFUL_PAUSE", "NO MORE DOWNLOAD");
#endif
disconnect(errors::torrent_paused, operation_t::bittorrent);
}
void peer_connection::on_disk_write_complete(storage_error const& error
, peer_request const& p, std::shared_ptr<torrent> t)
{
TORRENT_ASSERT(is_single_thread());
#ifndef TORRENT_DISABLE_LOGGING
if (should_log(peer_log_alert::info))
{
peer_log(peer_log_alert::info, "FILE_ASYNC_WRITE_COMPLETE", "piece: %d s: %x l: %x e: %s"
, static_cast<int>(p.piece), p.start, p.length, error.ec.message().c_str());
}
#endif
m_counters.inc_stats_counter(counters::queued_write_bytes, -p.length);
m_outstanding_writing_bytes -= p.length;
TORRENT_ASSERT(m_outstanding_writing_bytes >= 0);
// every peer is entitled to allocate a disk buffer if it has no writes outstanding
// see the comment in incoming_piece
if (m_outstanding_writing_bytes == 0
| ||
relevance 3 | ../src/peer_connection.cpp:4027 | once peers are properly put in graceful pause mode, they can cancel all outstanding requests and this test can be removed. |
once peers are properly put in graceful pause mode, they can
cancel all outstanding requests and this test can be removed.../src/peer_connection.cpp:4027
if (!p->m_deferred_send_block_requests)
return;
p->m_deferred_send_block_requests = false;
p->send_block_requests_impl();
});
m_deferred_send_block_requests = true;
}
void peer_connection::send_block_requests_impl()
{
TORRENT_ASSERT(is_single_thread());
INVARIANT_CHECK;
std::shared_ptr<torrent> t = m_torrent.lock();
TORRENT_ASSERT(t);
if (m_disconnecting) return;
if (t->graceful_pause()) return;
// we can't download pieces in these states
if (t->state() == torrent_status::checking_files
|| t->state() == torrent_status::checking_resume_data
|| t->state() == torrent_status::downloading_metadata)
return;
if (int(m_download_queue.size()) >= m_desired_queue_size
|| t->upload_mode()) return;
bool const empty_download_queue = m_download_queue.empty();
while (!m_request_queue.empty()
&& (int(m_download_queue.size()) < m_desired_queue_size
|| m_queued_time_critical > 0))
{
pending_block block = m_request_queue.front();
m_request_queue.erase(m_request_queue.begin());
if (m_queued_time_critical) --m_queued_time_critical;
// if we're a seed, we don't have a piece picker
// so we don't have to worry about invariants getting
// out of sync with it
if (!t->has_picker()) continue;
// this can happen if a block times out, is re-requested and
// then arrives "unexpectedly"
if (t->picker().is_downloaded(block.block))
{
| ||
relevance 3 | ../src/peer_connection.cpp:4713 | new_piece should be an optional. piece index -1 should not be allowed |
new_piece should be an optional. piece index -1
should not be allowed../src/peer_connection.cpp:4713
// pieces may be empty if we don't have metadata yet
if (p.pieces.empty())
{
p.progress = 0.f;
p.progress_ppm = 0;
}
else
{
#if TORRENT_NO_FPU
p.progress = 0.f;
#else
p.progress = float(p.pieces.count()) / float(p.pieces.size());
#endif
p.progress_ppm = int(std::int64_t(p.pieces.count()) * 1000000 / p.pieces.size());
}
}
#ifndef TORRENT_DISABLE_SUPERSEEDING
void peer_connection::superseed_piece(piece_index_t const replace_piece
, piece_index_t const new_piece)
{
TORRENT_ASSERT(is_single_thread());
if (is_connecting()) return;
if (in_handshake()) return;
if (new_piece == piece_index_t(-1))
{
if (m_superseed_piece[0] == piece_index_t(-1)) return;
m_superseed_piece[0] = piece_index_t(-1);
m_superseed_piece[1] = piece_index_t(-1);
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "SUPER_SEEDING", "ending");
#endif
std::shared_ptr<torrent> t = m_torrent.lock();
TORRENT_ASSERT(t);
// this will either send a full bitfield or
// a have-all message, effectively terminating
// super-seeding, since the peer may pick any piece
write_bitfield();
return;
}
TORRENT_ASSERT(!has_piece(new_piece));
#ifndef TORRENT_DISABLE_LOGGING
| ||
relevance 3 | ../src/ut_metadata.cpp:268 | use the aux::write_* functions and the span here instead, it will fit better with send_buffer() |
use the aux::write_* functions and the span here instead, it
will fit better with send_buffer()../src/ut_metadata.cpp:268 int metadata_piece_size = 0;
if (m_torrent.valid_metadata())
e["total_size"] = m_tp.metadata().size();
if (type == msg_t::piece)
{
TORRENT_ASSERT(piece >= 0 && piece < (m_tp.metadata().size() + 16 * 1024 - 1) / (16 * 1024));
TORRENT_ASSERT(m_pc.associated_torrent().lock()->valid_metadata());
TORRENT_ASSERT(m_torrent.valid_metadata());
int const offset = piece * 16 * 1024;
metadata = m_tp.metadata().data() + offset;
metadata_piece_size = std::min(
int(m_tp.metadata().size()) - offset, 16 * 1024);
TORRENT_ASSERT(metadata_piece_size > 0);
TORRENT_ASSERT(offset >= 0);
TORRENT_ASSERT(offset + metadata_piece_size <= m_tp.metadata().size());
}
char msg[200];
char* header = msg;
char* p = &msg[6];
int const len = bencode(p, e);
int const total_size = 2 + len + metadata_piece_size;
namespace io = aux;
io::write_uint32(total_size, header);
io::write_uint8(bt_peer_connection::msg_extended, header);
io::write_uint8(m_message_index, header);
m_pc.send_buffer({msg, len + 6});
| ||
relevance 3 | ../src/session_handle.cpp:672 | expose the sequence_number, public_key, secret_key and signature types to the client |
expose the sequence_number, public_key, secret_key and signature
types to the client../src/session_handle.cpp:672 void session_handle::dht_get_item(sha1_hash const& target)
{
#ifndef TORRENT_DISABLE_DHT
async_call(&session_impl::dht_get_immutable_item, target);
#else
TORRENT_UNUSED(target);
#endif
}
void session_handle::dht_get_item(std::array<char, 32> key
, std::string salt)
{
#ifndef TORRENT_DISABLE_DHT
async_call(&session_impl::dht_get_mutable_item, key, salt);
#else
TORRENT_UNUSED(key);
TORRENT_UNUSED(salt);
#endif
}
sha1_hash session_handle::dht_put_item(entry data)
{
std::vector<char> buf;
bencode(std::back_inserter(buf), data);
sha1_hash const ret = hasher(buf).final();
#ifndef TORRENT_DISABLE_DHT
async_call(&session_impl::dht_put_immutable_item, data, ret);
#endif
return ret;
}
void session_handle::dht_put_item(std::array<char, 32> key
, std::function<void(entry&, std::array<char,64>&
, std::int64_t&, std::string const&)> cb
, std::string salt)
{
#ifndef TORRENT_DISABLE_DHT
async_call(&session_impl::dht_put_mutable_item, key, cb, salt);
#else
TORRENT_UNUSED(key);
TORRENT_UNUSED(cb);
TORRENT_UNUSED(salt);
#endif
}
void session_handle::dht_get_peers(sha1_hash const& info_hash)
{
#ifndef TORRENT_DISABLE_DHT
async_call(&session_impl::dht_get_peers, info_hash);
#else
| ||
relevance 3 | ../src/session_impl.cpp:1142 | closing the udp sockets here means that the uTP connections cannot be closed gracefully |
closing the udp sockets here means that
the uTP connections cannot be closed gracefully../src/session_impl.cpp:1142#ifndef TORRENT_DISABLE_LOGGING
session_log(" aborting all connections (%d)", int(m_connections.size()));
#endif
// abort all connections
for (auto i = m_connections.begin(); i != m_connections.end();)
{
peer_connection* p = (*i).get();
++i;
p->disconnect(errors::stopping_torrent, operation_t::bittorrent);
}
// close the listen sockets
for (auto const& l : m_listen_sockets)
{
if (l->sock)
{
l->sock->close(ec);
TORRENT_ASSERT(!ec);
}
if (l->udp_sock)
{
l->udp_sock->sock.close();
}
}
// we need to give all the sockets an opportunity to actually have their handlers
// called and cancelled before we continue the shutdown. This is a bit
// complicated, if there are no "undead" peers, it's safe to resume the
// shutdown, but if there are, we have to wait for them to be cleared out
// first. In session_impl::on_tick() we check them periodically. If we're
// shutting down and we remove the last one, we'll initiate
// shutdown_stage2 from there.
if (m_undead_peers.empty())
{
post(m_io_context, make_handler([this] { abort_stage2(); }
, m_abort_handler_storage, *this));
}
}
void session_impl::abort_stage2() noexcept
{
m_download_rate.close();
m_upload_rate.close();
// it's OK to detach the threads here. The disk_io_thread
// has an internal counter and won't release the network
// thread until they're all dead (via m_work).
m_disk_thread->abort(false);
// now it's OK for the network thread to exit
| ||
relevance 3 | ../src/session_impl.cpp:1636 | the logic in this if-block should be factored out into a separate function. At least most of it |
the logic in this if-block should be factored out into a
separate function. At least most of it../src/session_impl.cpp:1636 , (lep.flags & listen_socket_t::local_network) ? "local-network " : ""
, (lep.flags & listen_socket_t::accept_incoming) ? "accept-incoming " : "no-incoming "
, (lep.flags & listen_socket_t::was_expanded) ? "expanded-ip " : ""
, (lep.flags & listen_socket_t::proxy) ? "proxy " : "");
}
#endif
auto ret = std::make_shared<listen_socket_t>();
ret->ssl = lep.ssl;
ret->original_port = bind_ep.port();
ret->flags = lep.flags;
ret->netmask = lep.netmask;
operation_t last_op = operation_t::unknown;
socket_type_t const sock_type
= (lep.ssl == transport::ssl)
? socket_type_t::tcp_ssl
: socket_type_t::tcp;
// if we're in force-proxy mode, don't open TCP listen sockets. We cannot
// accept connections on our local machine in this case.
if (ret->flags & listen_socket_t::accept_incoming)
{
ret->sock = std::make_shared<tcp::acceptor>(m_io_context);
ret->sock->open(bind_ep.protocol(), ec);
last_op = operation_t::sock_open;
if (ec)
{
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
session_log("failed to open socket: %s"
, ec.message().c_str());
}
#endif
if (m_alerts.should_post<listen_failed_alert>())
m_alerts.emplace_alert<listen_failed_alert>(lep.device, bind_ep, last_op
, ec, sock_type);
return ret;
}
#ifdef TORRENT_WINDOWS
{
// this is best-effort. ignore errors
error_code err;
ret->sock->set_option(exclusive_address_use(true), err);
#ifndef TORRENT_DISABLE_LOGGING
if (err && should_log())
{
session_log("failed enable exclusive address use on listen socket: %s"
, err.message().c_str());
| ||
relevance 3 | ../src/session_impl.cpp:2604 | it would be neat if the utp socket manager would handle ICMP errors too |
it would be neat if the utp socket manager would
handle ICMP errors too../src/session_impl.cpp:2604
std::shared_ptr<session_udp_socket> s = socket.lock();
if (!s) return;
struct utp_socket_manager& mgr =
#ifdef TORRENT_SSL_PEERS
ssl == transport::ssl ? m_ssl_utp_socket_manager :
#endif
m_utp_socket_manager;
for (;;)
{
aux::array<udp_socket::packet, 50> p;
error_code err;
int const num_packets = s->sock.read(p, err);
for (udp_socket::packet& packet : span<udp_socket::packet>(p).first(num_packets))
{
if (packet.error)
{
#ifndef TORRENT_DISABLE_DHT
if (m_dht)
m_dht->incoming_error(packet.error, packet.from);
#endif
m_tracker_manager.incoming_error(packet.error, packet.from);
continue;
}
span<char const> const buf = packet.data;
if (!packet.hostname.empty())
{
// only the tracker manager supports receiving UDP packets
// from hostnames. If it won't handle it, no one else will
// either
m_tracker_manager.incoming_packet(packet.hostname, buf);
continue;
}
// give the uTP socket manager first dibs on the packet. Presumably
// the majority of packets are uTP packets.
if (!mgr.incoming_packet(ls, packet.from, buf))
{
// if it wasn't a uTP packet, try the other users of the UDP
// socket
bool handled = false;
#ifndef TORRENT_DISABLE_DHT
auto listen_socket = ls.lock();
if (m_dht && buf.size() > 20
&& buf.front() == 'd'
| ||
relevance 3 | ../src/session_impl.cpp:4153 | it would probably make sense to have a separate list of peers that are eligible for optimistic unchoke, similar to the torrents perhaps this could even iterate over the pool allocators of torrent_peer objects. It could probably be done in a single pass and collect the n best candidates. maybe just a queue of peers would make even more sense, just pick the next peer in the queue for unchoking. It would be O(1). |
it would probably make sense to have a separate list of peers
that are eligible for optimistic unchoke, similar to the torrents
perhaps this could even iterate over the pool allocators of
torrent_peer objects. It could probably be done in a single pass and
collect the n best candidates. maybe just a queue of peers would make
even more sense, just pick the next peer in the queue for unchoking. It
would be O(1).../src/session_impl.cpp:4153 }
void session_impl::recalculate_optimistic_unchoke_slots()
{
INVARIANT_CHECK;
TORRENT_ASSERT(is_single_thread());
if (m_stats_counters[counters::num_unchoke_slots] == 0) return;
// if we unchoke everyone, skip this logic
if (settings().get_int(settings_pack::choking_algorithm) == settings_pack::fixed_slots_choker
&& settings().get_int(settings_pack::unchoke_slots_limit) < 0)
return;
std::vector<opt_unchoke_candidate> opt_unchoke;
// collect the currently optimistically unchoked peers here, so we can
// choke them when we've found new optimistic unchoke candidates.
std::vector<torrent_peer*> prev_opt_unchoke;
for (auto& i : m_connections)
{
peer_connection* const p = i.get();
TORRENT_ASSERT(p);
torrent_peer* pi = p->peer_info_struct();
if (!pi) continue;
if (pi->web_seed) continue;
if (pi->optimistically_unchoked)
{
prev_opt_unchoke.push_back(pi);
}
torrent const* t = p->associated_torrent().lock().get();
if (!t) continue;
| ||
relevance 3 | ../src/session_impl.cpp:4176 | peers should know whether their torrent is paused or not, instead of having to ask it over and over again |
peers should know whether their torrent is paused or not,
instead of having to ask it over and over again../src/session_impl.cpp:4176 // collect the currently optimistically unchoked peers here, so we can
// choke them when we've found new optimistic unchoke candidates.
std::vector<torrent_peer*> prev_opt_unchoke;
for (auto& i : m_connections)
{
peer_connection* const p = i.get();
TORRENT_ASSERT(p);
torrent_peer* pi = p->peer_info_struct();
if (!pi) continue;
if (pi->web_seed) continue;
if (pi->optimistically_unchoked)
{
prev_opt_unchoke.push_back(pi);
}
torrent const* t = p->associated_torrent().lock().get();
if (!t) continue;
if (t->is_paused()) continue;
if (!p->is_connecting()
&& !p->is_disconnecting()
&& p->is_peer_interested()
&& t->free_upload_slots()
&& (p->is_choked() || pi->optimistically_unchoked)
&& !p->ignore_unchoke_slots()
&& t->valid_metadata())
{
opt_unchoke.emplace_back(&i);
}
}
// find the peers that has been waiting the longest to be optimistically
// unchoked
int num_opt_unchoke = m_settings.get_int(settings_pack::num_optimistic_unchoke_slots);
int const allowed_unchoke_slots = int(m_stats_counters[counters::num_unchoke_slots]);
if (num_opt_unchoke == 0) num_opt_unchoke = std::max(1, allowed_unchoke_slots / 5);
if (num_opt_unchoke > int(opt_unchoke.size())) num_opt_unchoke =
int(opt_unchoke.size());
// find the n best optimistic unchoke candidates
std::partial_sort(opt_unchoke.begin()
, opt_unchoke.begin() + num_opt_unchoke
, opt_unchoke.end()
#ifndef TORRENT_DISABLE_EXTENSIONS
, last_optimistic_unchoke_cmp(m_ses_extensions[plugins_optimistic_unchoke_idx])
#else
, last_optimistic_unchoke_cmp()
| ||
relevance 3 | ../src/session_impl.cpp:4422 | there should be a pre-calculated list of all peers eligible for unchoking |
there should be a pre-calculated list of all peers eligible for
unchoking../src/session_impl.cpp:4422 }
void session_impl::recalculate_unchoke_slots()
{
TORRENT_ASSERT(is_single_thread());
time_point const now = aux::time_now();
time_duration const unchoke_interval = now - m_last_choke;
m_last_choke = now;
// if we unchoke everyone, skip this logic
if (settings().get_int(settings_pack::choking_algorithm) == settings_pack::fixed_slots_choker
&& settings().get_int(settings_pack::unchoke_slots_limit) < 0)
{
m_stats_counters.set_value(counters::num_unchoke_slots, std::numeric_limits<int>::max());
return;
}
// build list of all peers that are
// unchokable.
std::vector<peer_connection*> peers;
for (auto i = m_connections.begin(); i != m_connections.end();)
{
std::shared_ptr<peer_connection> p = *i;
TORRENT_ASSERT(p);
++i;
torrent* const t = p->associated_torrent().lock().get();
torrent_peer* const pi = p->peer_info_struct();
if (p->ignore_unchoke_slots() || t == nullptr || pi == nullptr
|| pi->web_seed || t->is_paused())
{
p->reset_choke_counters();
continue;
}
if (!p->is_peer_interested()
|| p->is_disconnecting()
|| p->is_connecting())
{
// this peer is not unchokable. So, if it's unchoked
// already, make sure to choke it.
if (p->is_choked())
{
p->reset_choke_counters();
continue;
}
if (pi && pi->optimistically_unchoked)
{
m_stats_counters.inc_stats_counter(counters::num_peers_up_unchoked_optimistic, -1);
pi->optimistically_unchoked = false;
| ||
relevance 3 | ../src/session_impl.cpp:6134 | use public_key here instead of std::array |
use public_key here instead of std::array../src/session_impl.cpp:6134
void session_impl::dht_get_immutable_item(sha1_hash const& target)
{
if (!m_dht) return;
m_dht->get_item(target, std::bind(&session_impl::get_immutable_callback
, this, target, _1));
}
// callback for dht_mutable_get
void session_impl::get_mutable_callback(dht::item const& i
, bool const authoritative)
{
TORRENT_ASSERT(i.is_mutable());
m_alerts.emplace_alert<dht_mutable_item_alert>(i.pk().bytes
, i.sig().bytes, i.seq().value
, i.salt(), i.value(), authoritative);
}
// key is a 32-byte binary string, the public key to look up.
// the salt is optional
void session_impl::dht_get_mutable_item(std::array<char, 32> key
, std::string salt)
{
if (!m_dht) return;
m_dht->get_item(dht::public_key(key.data()), std::bind(&session_impl::get_mutable_callback
, this, _1, _2), std::move(salt));
}
namespace {
void on_dht_put_immutable_item(aux::alert_manager& alerts, sha1_hash target, int num)
{
if (alerts.should_post<dht_put_alert>())
alerts.emplace_alert<dht_put_alert>(target, num);
}
void on_dht_put_mutable_item(aux::alert_manager& alerts, dht::item const& i, int num)
{
if (alerts.should_post<dht_put_alert>())
{
dht::signature const sig = i.sig();
dht::public_key const pk = i.pk();
dht::sequence_number const seq = i.seq();
std::string salt = i.salt();
alerts.emplace_alert<dht_put_alert>(pk.bytes, sig.bytes
, std::move(salt), seq.value, num);
}
}
void put_mutable_callback(dht::item& i
, std::function<void(entry&, std::array<char, 64>&
| ||
relevance 3 | ../src/kademlia/rpc_manager.cpp:71 | move this into it's own .cpp file |
move this into it's own .cpp file../src/kademlia/rpc_manager.cpp:71#include <libtorrent/kademlia/get_item.hpp>
#include <libtorrent/kademlia/sample_infohashes.hpp>
#include <libtorrent/aux_/session_settings.hpp>
#include <libtorrent/socket_io.hpp> // for print_endpoint
#include <libtorrent/aux_/time.hpp> // for aux::time_now
#include <libtorrent/aux_/aligned_union.hpp>
#include <libtorrent/aux_/ip_helpers.hpp> // for is_v6
#include <type_traits>
#include <functional>
#ifndef TORRENT_DISABLE_LOGGING
#include <cinttypes> // for PRId64 et.al.
#endif
using namespace std::placeholders;
namespace libtorrent { namespace dht {
constexpr observer_flags_t observer::flag_queried;
constexpr observer_flags_t observer::flag_initial;
constexpr observer_flags_t observer::flag_no_id;
constexpr observer_flags_t observer::flag_short_timeout;
constexpr observer_flags_t observer::flag_failed;
constexpr observer_flags_t observer::flag_ipv6_address;
constexpr observer_flags_t observer::flag_alive;
constexpr observer_flags_t observer::flag_done;
dht_observer* observer::get_observer() const
{
return m_algorithm->get_node().observer();
}
void observer::set_target(udp::endpoint const& ep)
{
m_sent = clock_type::now();
m_port = ep.port();
if (aux::is_v6(ep))
{
flags |= flag_ipv6_address;
m_addr.v6 = ep.address().to_v6().to_bytes();
}
else
{
flags &= ~flag_ipv6_address;
m_addr.v4 = ep.address().to_v4().to_bytes();
}
}
| ||
relevance 3 | ../include/libtorrent/torrent.hpp:1438 | factor out predictive pieces and all operations on it into a separate class (to use as member here instead) |
factor out predictive pieces and all operations on it into a
separate class (to use as member here instead)../include/libtorrent/torrent.hpp:1438#endif
std::string m_trackerid;
#if TORRENT_ABI_VERSION == 1
// deprecated in 1.1
std::string m_username;
std::string m_password;
#endif
std::string m_save_path;
#ifndef TORRENT_DISABLE_PREDICTIVE_PIECES
// this is a list of all pieces that we have announced
// as having, without actually having yet. If we receive
// a request for a piece in this list, we need to hold off
// on responding until we have completed the piece and
// verified its hash. If the hash fails, send reject to
// peers with outstanding requests, and dont_have to other
// peers. This vector is ordered, to make lookups fast.
std::vector<piece_index_t> m_predictive_pieces;
#endif
// v2 merkle tree for each file
aux::vector<aux::merkle_tree, file_index_t> m_merkle_trees;
// the performance counters of this session
counters& m_stats_counters;
// each bit represents a piece. a set bit means
// the piece has had its hash verified. This
// is only used in seed mode (when m_seed_mode
// is true)
typed_bitfield<piece_index_t> m_verified;
// this means there is an outstanding, async, operation
// to verify each piece that has a 1
typed_bitfield<piece_index_t> m_verifying;
// set if there's an error on this torrent
error_code m_error;
// used if there is any resume data. Some of the information from the
// add_torrent_params struct are needed later in the torrent object's life
// cycle, and not in the constructor. So we need to save if away here
std::unique_ptr<add_torrent_params> m_add_torrent_params;
// if the torrent is started without metadata, it may
// still be given a name until the metadata is received
// once the metadata is received this field will no
// longer be used and will be reset
| ||
relevance 3 | ../include/libtorrent/torrent.hpp:1498 | factor out the links (as well as update_list() to a separate class that torrent can inherit) |
factor out the links (as well as update_list() to a separate
class that torrent can inherit)../include/libtorrent/torrent.hpp:1498
// this was the last time _we_ saw a seed in this swarm
std::time_t m_last_seen_complete = 0;
// this is the time last any of our peers saw a seed
// in this swarm
std::time_t m_swarm_last_seen_complete = 0;
// keep a copy if the info-hash here, so it can be accessed from multiple
// threads, and be cheap to access from the client
info_hash_t m_info_hash;
public:
// these are the lists this torrent belongs to. For more
// details about each list, see session_impl.hpp. Each list
// represents a group this torrent belongs to and makes it
// efficient to enumerate only torrents belonging to a specific
// group. Such as torrents that want peer connections or want
// to be ticked etc.
aux::array<link, aux::session_interface::num_torrent_lists, torrent_list_index_t>
m_links;
private:
// m_num_verified = m_verified.count()
std::uint32_t m_num_verified = 0;
// if this torrent is running, this was the time
// when it was started. This is used to have a
// bias towards keeping seeding torrents that
// recently was started, to avoid oscillation
// this is specified at a second granularity
time_point32 m_started = aux::time_now32();
// if we're a seed, this is the timestamp of when we became one
time_point32 m_became_seed = aux::time_now32();
// if we're finished, this is the timestamp of when we finished
time_point32 m_became_finished = aux::time_now32();
// when checking, this is the first piece we have not
// issued a hash job for
piece_index_t m_checking_piece{0};
// the number of pieces we completed the check of
piece_index_t m_num_checked_pieces{0};
// if the error occurred on a file, this is the index of that file
// there are a few special cases, when this is negative. See
// set_error()
| ||
relevance 3 | ../include/libtorrent/pe_crypto.hpp:72 | dh_key_exchange should probably move into its own file |
dh_key_exchange should probably move into its own file../include/libtorrent/pe_crypto.hpp:72
#include <list>
#include <array>
#include <cstdint>
namespace libtorrent {
namespace mp = boost::multiprecision;
using key_t = mp::number<mp::cpp_int_backend<768, 768, mp::unsigned_magnitude, mp::unchecked, void>>;
TORRENT_EXTRA_EXPORT std::array<char, 96> export_key(key_t const& k);
// RC4 state from libtomcrypt
struct rc4 {
int x;
int y;
aux::array<std::uint8_t, 256> buf;
};
class TORRENT_EXTRA_EXPORT dh_key_exchange
{
public:
dh_key_exchange();
// Get local public key
key_t const& get_local_key() const { return m_dh_local_key; }
// read remote_pubkey, generate and store shared secret in
// m_dh_shared_secret.
void compute_secret(std::uint8_t const* remote_pubkey);
void compute_secret(key_t const& remote_pubkey);
key_t const& get_secret() const { return m_dh_shared_secret; }
sha1_hash const& get_hash_xor_mask() const { return m_xor_mask; }
private:
key_t m_dh_local_key;
key_t m_dh_local_secret;
key_t m_dh_shared_secret;
sha1_hash m_xor_mask;
};
struct TORRENT_EXTRA_EXPORT encryption_handler
{
std::tuple<int, span<span<char const>>>
encrypt(span<span<char>> iovec);
int decrypt(aux::crypto_receive_buffer& recv_buffer
| ||
relevance 3 | ../include/libtorrent/web_peer_connection.hpp:119 | if we make this be a disk_buffer_holder instead we would save a copy use allocate_disk_receive_buffer and release_disk_receive_buffer |
if we make this be a disk_buffer_holder instead
we would save a copy
use allocate_disk_receive_buffer and release_disk_receive_buffer../include/libtorrent/web_peer_connection.hpp:119 piece_block_progress downloading_piece_progress() const override;
void handle_padfile();
// this has one entry per http-request
// (might be more than the bt requests)
struct file_request_t
{
file_index_t file_index;
int length;
std::int64_t start;
};
std::deque<file_request_t> m_file_requests;
std::string m_url;
web_seed_t* m_web;
// this is used for intermediate storage of pieces to be delivered to the
// bittorrent engine
aux::vector<char> m_piece;
// the number of bytes we've forwarded to the incoming_payload() function
// in the current HTTP response. used to know where in the buffer the
// next response starts
int m_received_body;
// this is the offset inside the current receive
// buffer where the next chunk header will be.
// this is updated for each chunk header that's
// parsed. It does not necessarily point to a valid
// offset in the receive buffer, if we haven't received
// it yet. This offset never includes the HTTP header
int m_chunk_pos;
// this is the number of bytes we've already received
// from the next chunk header we're waiting for
int m_partial_chunk_header;
// the number of responses we've received so far on
// this connection
int m_num_responses;
};
}
#endif // TORRENT_WEB_PEER_CONNECTION_HPP_INCLUDED
| ||
relevance 3 | ../include/libtorrent/torrent_handle.hpp:535 | unify url_seed and http_seed with just web_seed, using the web_seed_entry. |
unify url_seed and http_seed with just web_seed, using the
web_seed_entry.../include/libtorrent/torrent_handle.hpp:535 // one returned from ``trackers()`` and will replace it. If you want an
// immediate effect, you have to call force_reannounce(). See
// announce_entry.
//
// ``post_trackers()`` is the asynchronous version of ``trackers()``. It
// will trigger a tracker_list_alert to be posted.
//
// ``add_tracker()`` will look if the specified tracker is already in the
// set. If it is, it doesn't do anything. If it's not in the current set
// of trackers, it will insert it in the tier specified in the
// announce_entry.
//
// The updated set of trackers will be saved in the resume data, and when
// a torrent is started with resume data, the trackers from the resume
// data will replace the original ones.
std::vector<announce_entry> trackers() const;
void replace_trackers(std::vector<announce_entry> const&) const;
void add_tracker(announce_entry const&) const;
void post_trackers() const;
// ``add_url_seed()`` adds another url to the torrent's list of url
// seeds. If the given url already exists in that list, the call has no
// effect. The torrent will connect to the server and try to download
// pieces from it, unless it's paused, queued, checking or seeding.
// ``remove_url_seed()`` removes the given url if it exists already.
// ``url_seeds()`` return a set of the url seeds currently in this
// torrent. Note that URLs that fails may be removed automatically from
// the list.
//
// See http-seeding_ for more information.
void add_url_seed(std::string const& url) const;
void remove_url_seed(std::string const& url) const;
std::set<std::string> url_seeds() const;
// These functions are identical as the ``*_url_seed()`` variants, but
// they operate on `BEP 17`_ web seeds instead of `BEP 19`_.
//
// See http-seeding_ for more information.
void add_http_seed(std::string const& url) const;
void remove_http_seed(std::string const& url) const;
std::set<std::string> http_seeds() const;
// add the specified extension to this torrent. The ``ext`` argument is
// a function that will be called from within libtorrent's context
// passing in the internal torrent object and the specified userdata
// pointer. The function is expected to return a shared pointer to
// a torrent_plugin instance.
void add_extension(
std::function<std::shared_ptr<torrent_plugin>(torrent_handle const&, client_data_t)> const& ext
, client_data_t userdata = client_data_t{});
| ||
relevance 3 | ../include/libtorrent/stat.hpp:257 | everything but payload counters and rates could probably be removed from here |
everything but payload counters and rates could probably be
removed from here../include/libtorrent/stat.hpp:257 // peer_connection is opened and have some previous
// transfers from earlier connections.
void add_stat(std::int64_t downloaded, std::int64_t uploaded)
{
m_stat[download_payload].offset(downloaded);
m_stat[upload_payload].offset(uploaded);
}
int last_payload_downloaded() const
{ return m_stat[download_payload].counter(); }
int last_payload_uploaded() const
{ return m_stat[upload_payload].counter(); }
int last_protocol_downloaded() const
{ return m_stat[download_protocol].counter(); }
int last_protocol_uploaded() const
{ return m_stat[upload_protocol].counter(); }
// these are the channels we keep stats for
enum
{
upload_payload,
upload_protocol,
download_payload,
download_protocol,
upload_ip_protocol,
download_ip_protocol,
num_channels
};
void clear()
{
for (int i = 0; i < num_channels; ++i)
m_stat[i].clear();
}
stat_channel const& operator[](int i) const
{
TORRENT_ASSERT(i >= 0 && i < num_channels);
return m_stat[i];
}
private:
stat_channel m_stat[num_channels];
};
}
#endif // TORRENT_STAT_HPP_INCLUDED
| ||
relevance 3 | ../include/libtorrent/enum_net.hpp:164 | use string_view for device_name |
use string_view for device_name../include/libtorrent/enum_net.hpp:164 // return nullopt.
TORRENT_EXTRA_EXPORT boost::optional<address> get_gateway(
ip_interface const& iface, span<ip_route const> routes);
// returns whether there is a route to the specified device for for any global
// internet address of the specified address family.
TORRENT_EXTRA_EXPORT bool has_internet_route(string_view device, int family
, span<ip_route const> routes);
// returns whether there are *any* routes to the internet in the routing
// table. This can be used to determine if the routing table is fully
// populated or not.
TORRENT_EXTRA_EXPORT bool has_any_internet_route(span<ip_route const> routes);
// attempt to bind socket to the device with the specified name. For systems
// that don't support SO_BINDTODEVICE the socket will be bound to one of the
// IP addresses of the specified device. In this case it is necessary to
// verify the local endpoint of the socket once the connection is established.
// the returned address is the ip the socket was bound to (or address_v4::any()
// in case SO_BINDTODEVICE succeeded and we don't need to verify it).
template <class Socket>
address bind_socket_to_device(io_context& ios, Socket& sock
, tcp const& protocol
, char const* device_name, int port, error_code& ec)
{
tcp::endpoint bind_ep(address_v4::any(), std::uint16_t(port));
address ip = make_address(device_name, ec);
if (!ec)
{
// this is to cover the case where "0.0.0.0" is considered any IPv4 or
// IPv6 address. If we're asking to be bound to an IPv6 address and
// providing 0.0.0.0 as the device, turn it into "::"
if (ip == address_v4::any() && protocol == boost::asio::ip::tcp::v6())
ip = address_v6::any();
bind_ep.address(ip);
// it appears to be an IP. Just bind to that address
sock.bind(bind_ep, ec);
return bind_ep.address();
}
ec.clear();
#if TORRENT_HAS_BINDTODEVICE
// try to use SO_BINDTODEVICE here, if that exists. If it fails,
// fall back to the mechanism we have below
aux::bind_device(sock, device_name, ec);
if (ec)
#endif
{
ec.clear();
| ||
relevance 3 | ../include/libtorrent/kademlia/routing_table.hpp:153 | to improve memory locality and scanning performance, turn the routing table into a single vector with boundaries for the nodes instead. Perhaps replacement nodes should be in a separate vector. |
to improve memory locality and scanning performance, turn the
routing table into a single vector with boundaries for the nodes instead.
Perhaps replacement nodes should be in a separate vector.../include/libtorrent/kademlia/routing_table.hpp:153// * Nodes are not marked as being stale, they keep a counter
// that tells how many times in a row they have failed. When
// a new node is to be inserted, the node that has failed
// the most times is replaced. If none of the nodes in the
// bucket has failed, then it is put in the replacement
// cache (just like in the paper).
// * The routing table bucket sizes are larger towards the "top" of the routing
// table. This is to get closer to the target in fewer round-trips.
// * Nodes with lower RTT are preferred and may replace nodes with higher RTT
// * Nodes that are "verified" (i.e. use a node-ID derived from their IP) are
// preferred and may replace nodes that are not verified.
TORRENT_EXTRA_EXPORT bool mostly_verified_nodes(bucket_t const&);
TORRENT_EXTRA_EXPORT bool compare_ip_cidr(address const& lhs, address const& rhs);
using find_nodes_flags_t = flags::bitfield_flag<std::uint8_t, struct find_nodes_flags_tag>;
class TORRENT_EXTRA_EXPORT routing_table
{
public:
using table_t = aux::vector<routing_table_node>;
routing_table(node_id const& id, udp proto
, int bucket_size
, aux::session_settings const& settings
, dht_logger* log);
routing_table(routing_table const&) = delete;
routing_table& operator=(routing_table const&) = delete;
#if TORRENT_ABI_VERSION == 1
#include "libtorrent/aux_/disable_deprecation_warnings_push.hpp"
void status(session_status& s) const;
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#endif
void status(std::vector<dht_routing_bucket>& s) const;
void node_failed(node_id const& id, udp::endpoint const& ep);
// adds an endpoint that will never be added to
// the routing table
void add_router_node(udp::endpoint const& router);
// iterates over the router nodes added
using router_iterator = std::set<udp::endpoint>::const_iterator;
router_iterator begin() const { return m_router_nodes.begin(); }
router_iterator end() const { return m_router_nodes.end(); }
enum add_node_status_t {
failed_to_add = 0,
| ||
relevance 3 | ../include/libtorrent/aux_/storage_utils.hpp:54 | remove this typedef, and use span for disk write operations |
remove this typedef, and use span for disk write
operations../include/libtorrent/aux_/storage_utils.hpp:54#ifndef TORRENT_STORAGE_UTILS_HPP_INCLUDE
#define TORRENT_STORAGE_UTILS_HPP_INCLUDE
#include <cstdint>
#include <string>
#include <functional>
#include "libtorrent/config.hpp"
#include "libtorrent/fwd.hpp"
#include "libtorrent/span.hpp"
#include "libtorrent/span.hpp"
#include "libtorrent/units.hpp"
#include "libtorrent/storage_defs.hpp" // for status_t
#include "libtorrent/session_types.hpp"
#include "libtorrent/error_code.hpp"
namespace libtorrent {
struct stat_cache;
using iovec_t = span<char>;
namespace aux {
// this is a read or write operation so that readwrite() knows
// what to do when it's actually touching the file
using fileop = std::function<int(file_index_t, std::int64_t, span<char>, storage_error&)>;
// this function is responsible for turning read and write operations in the
// torrent space (pieces) into read and write operations in the filesystem
// space (files on disk).
TORRENT_EXTRA_EXPORT int readwrite(file_storage const& files
, span<char> buf, piece_index_t piece, int offset
, storage_error& ec, fileop op);
// moves the files in file_storage f from ``save_path`` to
// ``destination_save_path`` according to the rules defined by ``flags``.
// returns the status code and the new save_path.
TORRENT_EXTRA_EXPORT std::pair<status_t, std::string>
move_storage(file_storage const& f
, std::string save_path
, std::string const& destination_save_path
, std::function<void(std::string const&, lt::error_code&)> const& move_partfile
, move_flags_t flags, storage_error& ec);
// deletes the files on fs from save_path according to options. Options may
// opt to only delete the partfile
TORRENT_EXTRA_EXPORT void
delete_files(file_storage const& fs, std::string const& save_path
, std::string const& part_file_name, remove_flags_t options, storage_error& ec);
| ||
relevance 2 | ../test/test_dht.cpp:1642 | test num_global_nodes |
test num_global_nodes../test/test_dht.cpp:1642 | ||
relevance 2 | ../test/test_dht.cpp:1643 | test need_refresh |
test need_refresh../test/test_dht.cpp:1643
s.set_bool(settings_pack::dht_restrict_routing_ips, false);
{
auto const ep = rand_udp_ep(rand_addr);
auto const id = generate_id_impl(ep.address(), 2);
table.node_seen(id, ep, 10);
}
nodes.clear();
for (int i = 0; i < 10000; ++i)
{
auto const ep = rand_udp_ep(rand_addr);
auto const id = generate_id_impl(ep.address(), 6);
table.node_seen(id, ep, 20 + (id[19] & 0xff));
}
std::printf("active buckets: %d\n", table.num_active_buckets());
TEST_CHECK(table.num_active_buckets() == 11
|| table.num_active_buckets() == 12);
TEST_CHECK(std::get<0>(table.size()) >= bucket_size * 10);
print_state(std::cout, table);
table.for_each_node(std::bind(node_push_back, &nodes, _1), nullptr);
std::printf("nodes: %d\n", int(nodes.size()));
{
node_id const id = generate_random_id();
std::vector<node_entry> temp = table.find_node(id, {}, int(nodes.size()) * 2);
std::printf("returned-all: %d\n", int(temp.size()));
TEST_EQUAL(temp.size(), nodes.size());
}
// This makes sure enough of the nodes returned are actually
// part of the closest nodes
std::set<node_id> duplicates;
const int reps = 50;
for (int r = 0; r < reps; ++r)
{
node_id const id = generate_random_id();
std::vector<node_entry> temp = table.find_node(id, {}, bucket_size * 2);
TEST_EQUAL(int(temp.size()), std::min(bucket_size * 2, int(nodes.size())));
std::sort(nodes.begin(), nodes.end(), std::bind(&compare_ref
, std::bind(&node_entry::id, _1)
, std::bind(&node_entry::id, _2), id));
| ||
relevance 2 | ../test/test_dht.cpp:2828 | split this up into smaller test cases |
split this up into smaller test cases../test/test_dht.cpp:2828
TEST_EQUAL(aux::to_hex(sig.bytes)
, "6834284b6b24c3204eb2fea824d82f88883a3d95e8b4a21b8c0ded553d17d17d"
"df9a8a7104b1258f30bed3787e6cb896fca78c58f8e03b5f18f14951a87d9a08");
sha1_hash target_id = item_target_id(test_salt, pk);
TEST_EQUAL(aux::to_hex(target_id), "411eba73b6f087ca51a3795d9c8c938d365e32c1");
}
TORRENT_TEST(signing_test3)
{
// test vector 3
// test content
span<char const> test_content("12:Hello World!", 15);
sha1_hash target_id = item_target_id(test_content);
TEST_EQUAL(aux::to_hex(target_id), "e5f96f6f38320f0f33959cb4d3d656452117aadb");
}
TORRENT_TEST(verify_message)
{
char error_string[200];
// test verify_message
static const key_desc_t msg_desc[] = {
{"A", bdecode_node::string_t, 4, 0},
{"B", bdecode_node::dict_t, 0, key_desc_t::optional | key_desc_t::parse_children},
{"B1", bdecode_node::string_t, 0, 0},
{"B2", bdecode_node::string_t, 0, key_desc_t::last_child},
{"C", bdecode_node::dict_t, 0, key_desc_t::optional | key_desc_t::parse_children},
{"C1", bdecode_node::string_t, 0, 0},
{"C2", bdecode_node::string_t, 0, key_desc_t::last_child},
};
bdecode_node msg_keys[7];
bdecode_node ent;
error_code ec;
char const test_msg[] = "d1:A4:test1:Bd2:B15:test22:B25:test3ee";
bdecode(test_msg, test_msg + sizeof(test_msg)-1, ent, ec);
std::printf("%s\n", print_entry(ent).c_str());
bool ret = verify_message(ent, msg_desc, msg_keys, error_string);
TEST_CHECK(ret);
TEST_CHECK(msg_keys[0]);
if (msg_keys[0]) TEST_EQUAL(msg_keys[0].string_value(), "test");
TEST_CHECK(msg_keys[1]);
TEST_CHECK(msg_keys[2]);
if (msg_keys[2]) TEST_EQUAL(msg_keys[2].string_value(), "test2");
| ||
relevance 2 | ../test/test_storage.cpp:639 | split this test up into smaller parts |
split this test up into smaller parts../test/test_storage.cpp:639 io->submit_jobs();
ios.restart();
run_until(ios, done);
TEST_EQUAL(oversized, bool(flags & test_oversized));
for (auto const i : info->piece_range())
{
done = false;
io->async_hash(st, i, {}
, disk_interface::sequential_access | disk_interface::volatile_read | disk_interface::v1_hash
, std::bind(&on_piece_checked, _1, _2, _3, &done));
io->submit_jobs();
ios.restart();
run_until(ios, done);
}
io->abort(true);
}
template <typename StorageType>
void run_test()
{
std::string const test_path = current_working_directory();
std::cout << "\n=== " << test_path << " ===\n" << std::endl;
std::shared_ptr<torrent_info> info;
std::vector<char> piece0 = new_piece(piece_size);
std::vector<char> piece1 = new_piece(piece_size);
std::vector<char> piece2 = new_piece(piece_size);
std::vector<char> piece3 = new_piece(piece_size);
delete_dirs("temp_storage");
file_storage fs;
fs.add_file("temp_storage/test1.tmp", 17);
fs.add_file("temp_storage/test2.tmp", 612);
fs.add_file("temp_storage/test3.tmp", 0);
fs.add_file("temp_storage/test4.tmp", 0);
fs.add_file("temp_storage/test5.tmp", 3253);
fs.add_file("temp_storage/test6.tmp", 841);
int const last_file_size = 4 * int(piece_size) - int(fs.total_size());
fs.add_file("temp_storage/test7.tmp", last_file_size);
// File layout
// +-+--+++-------+-------+----------------------------------------------------------------------------------------+
// |1| 2||| file5 | file6 | file7 |
// +-+--+++-------+-------+----------------------------------------------------------------------------------------+
// | | | | |
// | piece 0 | piece 1 | piece 2 | piece 3 |
| ||
relevance 2 | ../test/test_piece_picker.cpp:2825 | test picking with partial pieces and other peers present so that both backup_pieces and backup_pieces2 are used |
test picking with partial pieces and other peers present so that both
backup_pieces and backup_pieces2 are used../test/test_piece_picker.cpp:2825 | ||
relevance 2 | ../src/torrent.cpp:503 | post alert |
post alert../src/torrent.cpp:503 if (m_current_gauge_state != no_gauge_state)
inc_stats_counter(m_current_gauge_state + counters::num_checking_torrents, -1);
if (new_gauge_state != no_gauge_state)
inc_stats_counter(new_gauge_state + counters::num_checking_torrents, 1);
TORRENT_ASSERT(new_gauge_state >= 0);
TORRENT_ASSERT(new_gauge_state <= no_gauge_state);
m_current_gauge_state = static_cast<std::uint32_t>(new_gauge_state);
}
void torrent::leave_seed_mode(seed_mode_t const checking)
{
if (!m_seed_mode) return;
if (checking == seed_mode_t::check_files)
{
// this means the user promised we had all the
// files, but it turned out we didn't. This is
// an error.
#ifndef TORRENT_DISABLE_LOGGING
debug_log("*** FAILED SEED MODE, rechecking");
#endif
}
#ifndef TORRENT_DISABLE_LOGGING
debug_log("*** LEAVING SEED MODE (%s)"
, checking == seed_mode_t::skip_checking ? "as seed" : "as non-seed");
#endif
m_seed_mode = false;
// seed is false if we turned out not
// to be a seed after all
if (checking == seed_mode_t::check_files
&& state() != torrent_status::checking_resume_data)
{
m_have_all = false;
set_state(torrent_status::downloading);
force_recheck();
}
m_num_verified = 0;
m_verified.clear();
m_verifying.clear();
set_need_save_resume(torrent_handle::if_state_changed);
}
void torrent::verified(piece_index_t const piece)
{
TORRENT_ASSERT(!m_verified.get_bit(piece));
++m_num_verified;
| ||
relevance 2 | ../src/torrent.cpp:1842 | add a unit test where we don't have metadata, connect to a peer that sends a bitfield that's too large, then we get the metadata |
add a unit test where we don't have metadata, connect to a peer
that sends a bitfield that's too large, then we get the metadata../src/torrent.cpp:1842 for (auto const& f : m_add_torrent_params->renamed_files)
{
if (f.first < file_index_t(0) || f.first >= fs.end_file()) continue;
m_torrent_file->rename_file(file_index_t(f.first), f.second);
}
}
construct_storage();
#ifndef TORRENT_DISABLE_SHARE_MODE
if (m_share_mode && valid_metadata())
{
// in share mode, all pieces have their priorities initialized to 0
m_file_priority.clear();
m_file_priority.resize(m_torrent_file->num_files(), dont_download);
}
#endif
// it's important to initialize the peers early, because this is what will
// fix up their have-bitmasks to have the correct size
if (!m_connections_initialized)
{
m_connections_initialized = true;
// all peer connections have to initialize themselves now that the metadata
// is available
// copy the peer list since peers may disconnect and invalidate
// m_connections as we initialize them
for (auto c : m_connections)
{
auto pc = c->self();
if (pc->is_disconnecting()) continue;
pc->on_metadata_impl();
if (pc->is_disconnecting()) continue;
pc->init();
}
}
// in case file priorities were passed in via the add_torrent_params
// and also in the case of share mode, we need to update the priorities
// this has to be applied before piece priority
if (!m_file_priority.empty())
{
// m_file_priority was loaded from the resume data, this doesn't
// alter any state that needs to be saved in the resume data
auto const ns = m_need_save_resume_data;
update_piece_priorities(m_file_priority);
m_need_save_resume_data = ns;
}
if (m_add_torrent_params)
{
| ||
relevance 2 | ../src/torrent.cpp:4473 | use chrono type for time duration |
use chrono type for time duration../src/torrent.cpp:4473 if (trust_points > 8) trust_points = 8;
p->trust_points = trust_points;
if (p->connection)
{
auto* peer = static_cast<peer_connection*>(p->connection);
TORRENT_ASSERT(peer->m_in_use == 1337);
peer->received_valid_data(index);
}
}
m_picker->piece_passed(index);
update_gauge();
we_have(index);
}
#ifndef TORRENT_DISABLE_PREDICTIVE_PIECES
// we believe we will complete this piece very soon
// announce it to peers ahead of time to eliminate the
// round-trip times involved in announcing it, requesting it
// and sending it
void torrent::predicted_have_piece(piece_index_t const index, int const milliseconds)
{
auto const i = std::lower_bound(m_predictive_pieces.begin()
, m_predictive_pieces.end(), index);
if (i != m_predictive_pieces.end() && *i == index) return;
for (auto p : m_connections)
{
TORRENT_INCREMENT(m_iterating_connections);
#ifndef TORRENT_DISABLE_LOGGING
p->peer_log(peer_log_alert::outgoing, "PREDICTIVE_HAVE", "piece: %d expected in %d ms"
, static_cast<int>(index), milliseconds);
#else
TORRENT_UNUSED(milliseconds);
#endif
p->announce_piece(index);
}
m_predictive_pieces.insert(i, index);
}
#endif
// blocks may contain the block indices of the blocks that failed (if this is
// a v2 torrent).
void torrent::piece_failed(piece_index_t const index, std::vector<int> blocks)
{
// if the last piece fails the peer connection will still
// think that it has received all of it until this function
// resets the download queue. So, we cannot do the
// invariant check here since it assumes:
// (total_done == m_torrent_file->total_size()) => is_seed()
| ||
relevance 2 | ../src/torrent.cpp:4900 | abort lookups this torrent has made via the session host resolver interface |
abort lookups this torrent has made via the
session host resolver interface../src/torrent.cpp:4900 TORRENT_UNUSED(e);
m_storage.reset();
#ifndef TORRENT_DISABLE_LOGGING
debug_log("Failed to flush disk cache: %s", e.what());
#endif
// clients may rely on this alert to be posted, so it's probably a
// good idea to post it here, even though we failed
if (alerts().should_post<cache_flushed_alert>())
alerts().emplace_alert<cache_flushed_alert>(get_handle());
}
m_ses.deferred_submit_jobs();
}
else
{
if (alerts().should_post<cache_flushed_alert>())
alerts().emplace_alert<cache_flushed_alert>(get_handle());
alerts().emplace_alert<torrent_removed_alert>(get_handle()
, info_hash(), get_userdata());
}
if (!m_apply_ip_filter)
{
inc_stats_counter(counters::non_filter_torrents, -1);
m_apply_ip_filter = true;
}
m_paused = false;
m_auto_managed = false;
update_state_list();
for (torrent_list_index_t i{}; i != m_links.end_index(); ++i)
{
if (!m_links[i].in_list()) continue;
m_links[i].unlink(m_ses.torrent_list(i), i);
}
// don't re-add this torrent to the state-update list
m_state_subscription = false;
}
// this is called when we're destructing non-gracefully. i.e. we're _just_
// destructing everything.
void torrent::panic()
{
m_storage.reset();
// if there are any other peers allocated still, we need to clear them
// now. They can't be cleared later because the allocator will already
// have been destructed
if (m_peer_list) m_peer_list->clear();
m_connections.clear();
m_outgoing_pids.clear();
m_peers_to_disconnect.clear();
| ||
relevance 2 | ../src/torrent.cpp:8008 | if peer is a really good peer, maybe we shouldn't disconnect it perhaps this logic should be disabled if we have too many idle peers (with some definition of idle) |
if peer is a really good peer, maybe we shouldn't disconnect it
perhaps this logic should be disabled if we have too many idle peers
(with some definition of idle)../src/torrent.cpp:8008
m_peers_to_disconnect.reserve(m_connections.size() + 1);
m_connections.reserve(m_connections.size() + 1);
#if TORRENT_USE_ASSERTS
error_code ec;
TORRENT_ASSERT(p->remote() == p->get_socket().remote_endpoint(ec) || ec);
#endif
TORRENT_ASSERT(p->peer_info_struct() != nullptr);
// we need to do this after we've added the peer to the peer_list
// since that's when the peer is assigned its peer_info object,
// which holds the rank
if (maybe_replace_peer)
{
// now, find the lowest rank peer and disconnect that
// if it's lower rank than the incoming connection
peer_connection* peer = find_lowest_ranking_peer();
if (peer != nullptr && peer->peer_rank() < p->peer_rank())
{
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
debug_log("CLOSING CONNECTION \"%s\" peer list full (low peer rank) "
"connections: %d limit: %d"
, print_endpoint(peer->remote()).c_str()
, num_peers()
, m_max_connections);
}
#endif
peer->disconnect(errors::too_many_connections, operation_t::bittorrent);
p->peer_disconnected_other();
}
else
{
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
debug_log("CLOSING CONNECTION \"%s\" peer list full (low peer rank) "
"connections: %d limit: %d"
, print_endpoint(p->remote()).c_str()
, num_peers()
, m_max_connections);
}
#endif
p->disconnect(errors::too_many_connections, operation_t::bittorrent);
// we have to do this here because from the peer's point of view
// it wasn't really attached to the torrent, but we do need
// to let peer_list know we're removing it
| ||
relevance 2 | ../src/bdecode.cpp:826 | attempt to simplify this implementation by embracing the span |
attempt to simplify this implementation by embracing the span../src/bdecode.cpp:826 }
bdecode_node bdecode(span<char const> buffer
, error_code& ec, int* error_pos, int depth_limit, int token_limit)
{
bdecode_node ret;
ec.clear();
if (buffer.size() > bdecode_token::max_offset)
{
if (error_pos) *error_pos = 0;
ec = bdecode_errors::limit_exceeded;
return ret;
}
// this is the stack of bdecode_token indices, into m_tokens.
// sp is the stack pointer, as index into the array, stack
int sp = 0;
TORRENT_ALLOCA(stack, stack_frame, depth_limit);
char const* start = buffer.data();
char const* end = start + buffer.size();
char const* const orig_start = start;
if (start == end)
TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
while (start <= end)
{
if (start >= end) TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
if (sp >= depth_limit)
TORRENT_FAIL_BDECODE(bdecode_errors::depth_exceeded);
--token_limit;
if (token_limit < 0)
TORRENT_FAIL_BDECODE(bdecode_errors::limit_exceeded);
// look for a new token
char const t = *start;
int const current_frame = sp;
// if we're currently parsing a dictionary, assert that
// every other node is a string.
if (current_frame > 0
&& ret.m_tokens[stack[current_frame - 1].token].type == bdecode_token::dict)
{
if (stack[current_frame - 1].state == 0)
{
// the current parent is a dict and we are parsing a key.
| ||
relevance 2 | ../src/web_peer_connection.cpp:626 | just make this peer not have the pieces associated with the file we just requested. Only when it doesn't have any of the file do the following pad files will make it complicated |
just make this peer not have the pieces
associated with the file we just requested. Only
when it doesn't have any of the file do the following
pad files will make it complicated../src/web_peer_connection.cpp:626
peer_connection::received_invalid_data(index, single_peer);
// if we don't think we have any of the files, allow banning the web seed
if (num_have_pieces() == 0) return true;
// don't disconnect, we won't request anything from this file again
return false;
}
void web_peer_connection::on_receive_padfile()
{
handle_padfile();
}
void web_peer_connection::handle_error(int const bytes_left)
{
std::shared_ptr<torrent> t = associated_torrent().lock();
TORRENT_ASSERT(t);
// temporarily unavailable, retry later
t->retry_web_seed(this, m_parser.header_duration("retry-after"));
if (t->alerts().should_post<url_seed_alert>())
{
std::string const error_msg = to_string(m_parser.status_code()).data()
+ (" " + m_parser.message());
t->alerts().emplace_alert<url_seed_alert>(t->get_handle(), m_url
, error_msg);
}
received_bytes(0, bytes_left);
disconnect(error_code(m_parser.status_code(), http_category()), operation_t::bittorrent, failure);
}
void web_peer_connection::disable(error_code const& ec)
{
// we should not try this server again.
m_web->disabled = true;
disconnect(ec, operation_t::bittorrent, peer_error);
if (m_web->ephemeral)
{
std::shared_ptr<torrent> t = associated_torrent().lock();
TORRENT_ASSERT(t);
t->remove_web_seed_conn(this);
}
m_web = nullptr;
TORRENT_ASSERT(is_disconnecting());
}
void web_peer_connection::handle_redirect(int const bytes_left)
{
| ||
relevance 2 | ../src/peer_connection.cpp:2548 | this should probably be based on time instead of number of request messages. For a very high throughput connection, 300 may be a legitimate number of requests to have in flight when getting choked |
this should probably be based on time instead of number
of request messages. For a very high throughput connection, 300
may be a legitimate number of requests to have in flight when
getting choked../src/peer_connection.cpp:2548 }
#endif
write_reject_request(r);
if (m_num_invalid_requests < std::numeric_limits<decltype(m_num_invalid_requests)>::max())
++m_num_invalid_requests;
if (t->alerts().should_post<invalid_request_alert>())
{
// msvc 12 appears to deduce the rvalue reference template
// incorrectly for bool temporaries. So, create a dummy instance
bool const peer_interested = bool(m_peer_interested);
t->alerts().emplace_alert<invalid_request_alert>(
t->get_handle(), m_remote, m_peer_id, r
, t->has_piece_passed(r.piece), peer_interested, false);
}
// every ten invalid request, remind the peer that it's choked
if (!m_peer_interested && m_num_invalid_requests % 10 == 0 && m_choked)
{
if (m_num_invalid_requests > 300 && !m_peer_choked
&& can_disconnect(errors::too_many_requests_when_choked))
{
disconnect(errors::too_many_requests_when_choked, operation_t::bittorrent, peer_error);
return;
}
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::outgoing_message, "CHOKE");
#endif
write_choke();
}
return;
}
// if we have choked the client
// ignore the request
int const blocks_per_piece =
(ti.piece_length() + t->block_size() - 1) / t->block_size();
// disconnect peers that downloads more than foo times an allowed
// fast piece
if (m_choked && fast_idx != -1 && m_accept_fast_piece_cnt[fast_idx] >= 3 * blocks_per_piece
&& can_disconnect(errors::too_many_requests_when_choked))
{
disconnect(errors::too_many_requests_when_choked, operation_t::bittorrent, peer_error);
return;
}
if (m_choked && fast_idx == -1)
{
| ||
relevance 2 | ../src/peer_connection.cpp:3293 | since we throw away the queue entry once we issue the disk job, this may happen. Instead, we should keep the queue entry around, mark it as having been requested from disk and once the disk job comes back, discard it if it has been cancelled. Maybe even be able to cancel disk jobs? |
since we throw away the queue entry once we issue
the disk job, this may happen. Instead, we should keep the
queue entry around, mark it as having been requested from
disk and once the disk job comes back, discard it if it has
been cancelled. Maybe even be able to cancel disk jobs?../src/peer_connection.cpp:3293
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::incoming_message, "CANCEL"
, "piece: %d s: %x l: %x", static_cast<int>(r.piece), r.start, r.length);
#endif
auto const i = std::find(m_requests.begin(), m_requests.end(), r);
if (i != m_requests.end())
{
m_counters.inc_stats_counter(counters::cancelled_piece_requests);
m_requests.erase(i);
if (m_requests.empty())
m_counters.inc_stats_counter(counters::num_peers_up_requests, -1);
write_reject_request(r);
}
else
{
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "INVALID_CANCEL", "got cancel not in the queue");
#endif
}
}
// -----------------------------
// --------- DHT PORT ----------
// -----------------------------
void peer_connection::incoming_dht_port(int const listen_port)
{
TORRENT_ASSERT(is_single_thread());
INVARIANT_CHECK;
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::incoming_message, "DHT_PORT", "p: %d", listen_port);
#endif
#ifndef TORRENT_DISABLE_DHT
m_ses.add_dht_node({m_remote.address(), std::uint16_t(listen_port)});
#else
TORRENT_UNUSED(listen_port);
#endif
}
// -----------------------------
// --------- HAVE ALL ----------
// -----------------------------
void peer_connection::incoming_have_all()
{
| ||
relevance 2 | ../src/peer_connection.cpp:4958 | use a deadline_timer for timeouts. Don't rely on second_tick()! Hook this up to connect timeout as well. This would improve performance because of less work in second_tick(), and might let use remove ticking entirely eventually |
use a deadline_timer for timeouts. Don't rely on second_tick()!
Hook this up to connect timeout as well. This would improve performance
because of less work in second_tick(), and might let use remove ticking
entirely eventually../src/peer_connection.cpp:4958 if (d > seconds(connect_timeout)
&& can_disconnect(errors::timed_out))
{
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "CONNECT_FAILED", "waited %d seconds"
, int(total_seconds(d)));
#endif
connect_failed(errors::timed_out);
return;
}
}
// if the bw_network flag isn't set, it means we are not even trying to
// read from this peer's socket. Most likely because we're applying a
// rate limit. If the peer is "slow" because we are rate limiting it,
// don't enforce timeouts. However, as soon as we *do* read from the
// socket, we expect to receive data, and not have timed out. Then we
// can enforce the timeouts.
bool const reading_socket = bool(m_channel_state[download_channel] & peer_info::bw_network);
if (reading_socket && d > seconds(timeout()) && !m_connecting && m_reading_bytes == 0
&& can_disconnect(errors::timed_out_inactivity))
{
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "LAST_ACTIVITY", "%d seconds ago"
, int(total_seconds(d)));
#endif
disconnect(errors::timed_out_inactivity, operation_t::bittorrent);
return;
}
// do not stall waiting for a handshake
int timeout = m_settings.get_int (settings_pack::handshake_timeout);
#if TORRENT_USE_I2P
timeout *= is_i2p(m_socket) ? 4 : 1;
#endif
if (reading_socket
&& !m_connecting
&& in_handshake()
&& d > seconds(timeout))
{
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "NO_HANDSHAKE", "waited %d seconds"
, int(total_seconds(d)));
#endif
disconnect(errors::timed_out_no_handshake, operation_t::bittorrent);
return;
}
// disconnect peers that we unchoked, but they didn't send a request in
// the last 60 seconds, and we haven't been working on servicing a request
| ||
relevance 2 | ../src/alert_manager.cpp:80 | keep a count of the number of threads waiting. Only if it's > 0 notify them |
keep a count of the number of threads waiting. Only if it's
> 0 notify them../src/alert_manager.cpp:80 return m_alerts[m_generation].front();
// this call can be interrupted prematurely by other signals
m_condition.wait_for(lock, max_wait);
if (!m_alerts[m_generation].empty())
return m_alerts[m_generation].front();
return nullptr;
}
void alert_manager::maybe_notify(alert* a)
{
if (m_alerts[m_generation].size() == 1)
{
// we just posted to an empty queue. If anyone is waiting for
// alerts, we need to notify them. Also (potentially) call the
// user supplied m_notify callback to let the client wake up its
// message loop to poll for alerts.
if (m_notify) m_notify();
m_condition.notify_all();
}
#ifndef TORRENT_DISABLE_EXTENSIONS
for (auto& e : m_ses_extensions)
e->on_alert(a);
#else
TORRENT_UNUSED(a);
#endif
}
void alert_manager::set_notify_function(std::function<void()> const& fun)
{
std::unique_lock<std::recursive_mutex> lock(m_mutex);
m_notify = fun;
if (!m_alerts[m_generation].empty())
{
if (m_notify) m_notify();
}
}
#ifndef TORRENT_DISABLE_EXTENSIONS
void alert_manager::add_extension(std::shared_ptr<plugin> ext)
{
m_ses_extensions.push_back(ext);
}
#endif
void alert_manager::get_all(std::vector<alert*>& alerts)
{
std::lock_guard<std::recursive_mutex> lock(m_mutex);
| ||
relevance 2 | ../src/peer_list.cpp:539 | it would be nice if there was a way to iterate over these torrent_peer objects in the order they are allocated in the pool instead. It would probably be more efficient |
it would be nice if there was a way to iterate over these
torrent_peer objects in the order they are allocated in the pool
instead. It would probably be more efficient../src/peer_list.cpp:539 , int session_time, torrent_state* state)
{
TORRENT_ASSERT(is_single_thread());
INVARIANT_CHECK;
const int candidate_count = 10;
peers.reserve(candidate_count);
int erase_candidate = -1;
if (bool(m_finished) != state->is_finished)
recalculate_connect_candidates(state);
external_ip const& external = state->ip;
int external_port = state->port;
if (m_round_robin >= int(m_peers.size())) m_round_robin = 0;
int max_peerlist_size = state->max_peerlist_size;
for (int iterations = std::min(int(m_peers.size()), 300);
iterations > 0; --iterations)
{
++state->loop_counter;
if (m_round_robin >= int(m_peers.size())) m_round_robin = 0;
torrent_peer& pe = *m_peers[m_round_robin];
TORRENT_ASSERT(pe.in_use);
int current = m_round_robin;
// if the number of peers is growing large
// we need to start weeding.
if (int(m_peers.size()) >= max_peerlist_size * 0.95
&& max_peerlist_size > 0)
{
if (is_erase_candidate(pe)
&& (erase_candidate == -1
|| !compare_peer_erase(*m_peers[erase_candidate], pe)))
{
if (should_erase_immediately(pe))
{
if (erase_candidate > current) --erase_candidate;
erase_peer(m_peers.begin() + current, state);
continue;
}
else
{
erase_candidate = current;
}
| ||
relevance 2 | ../src/instantiate_connection.cpp:44 | peer_connection and tracker_connection should probably be flags |
peer_connection and tracker_connection should probably be flags../src/instantiate_connection.cpp:44ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "libtorrent/socket.hpp"
#include "libtorrent/aux_/socket_type.hpp"
#include "libtorrent/aux_/utp_socket_manager.hpp"
#include "libtorrent/aux_/instantiate_connection.hpp"
#include "libtorrent/aux_/utp_stream.hpp"
#include "libtorrent/ssl_stream.hpp"
namespace libtorrent { namespace aux {
aux::socket_type instantiate_connection(io_context& ios
, aux::proxy_settings const& ps
, void* ssl_context
, utp_socket_manager* sm
, bool peer_connection
, bool tracker_connection)
{
#if !TORRENT_USE_SSL
TORRENT_UNUSED(ssl_context);
#endif
if (sm)
{
#if TORRENT_USE_SSL
if (ssl_context)
{
ssl_stream<utp_stream> s(ios, *static_cast<ssl::context*>(ssl_context));
s.next_layer().set_impl(sm->new_utp_socket(&s.next_layer()));
return socket_type(std::move(s));
}
else
#endif
{
utp_stream s(ios);
s.set_impl(sm->new_utp_socket(&s));
return socket_type(std::move(s));
}
}
#if TORRENT_USE_I2P
else if (ps.type == settings_pack::i2p_proxy)
{
| ||
relevance 2 | ../src/alert.cpp:2021 | the salt here is allocated on the heap. It would be nice to allocate in the stack_allocator |
the salt here is allocated on the heap. It would be nice to
allocate in the stack_allocator../src/alert.cpp:2021 }
dht_immutable_item_alert::dht_immutable_item_alert(aux::stack_allocator&
, sha1_hash const& t, entry i)
: target(t), item(std::move(i))
{}
std::string dht_immutable_item_alert::message() const
{
#ifdef TORRENT_DISABLE_ALERT_MSG
return {};
#else
char msg[1050];
std::snprintf(msg, sizeof(msg), "DHT immutable item %s [ %s ]"
, aux::to_hex(target).c_str()
, item.to_string().c_str());
return msg;
#endif
}
dht_mutable_item_alert::dht_mutable_item_alert(aux::stack_allocator&
, std::array<char, 32> const& k
, std::array<char, 64> const& sig
, std::int64_t sequence
, string_view s
, entry i
, bool a)
: key(k), signature(sig), seq(sequence), salt(s), item(std::move(i)), authoritative(a)
{}
std::string dht_mutable_item_alert::message() const
{
#ifdef TORRENT_DISABLE_ALERT_MSG
return {};
#else
char msg[1050];
std::snprintf(msg, sizeof(msg), "DHT mutable item (key=%s salt=%s seq=%" PRId64 " %s) [ %s ]"
, aux::to_hex(key).c_str()
, salt.c_str()
, seq
, authoritative ? "auth" : "non-auth"
, item.to_string().c_str());
return msg;
#endif
}
dht_put_alert::dht_put_alert(aux::stack_allocator&, sha1_hash const& t, int n)
: target(t)
, public_key()
, signature()
, salt()
| ||
relevance 2 | ../src/udp_tracker_connection.cpp:81 | support authentication here. tracker_req().auth |
support authentication here. tracker_req().auth../src/udp_tracker_connection.cpp:81 udp_tracker_connection::m_connection_cache;
std::mutex udp_tracker_connection::m_cache_mutex;
udp_tracker_connection::udp_tracker_connection(
io_context& ios
, tracker_manager& man
, tracker_request const& req
, std::weak_ptr<request_callback> c)
: tracker_connection(man, req, ios, std::move(c))
, m_transaction_id(0)
, m_attempts(0)
, m_state(action_t::error)
, m_abort(false)
{
update_transaction_id();
}
void udp_tracker_connection::start()
{
std::string hostname;
std::string protocol;
int port;
error_code ec;
std::tie(protocol, std::ignore, hostname, port, std::ignore)
= parse_url_components(tracker_req().url, ec);
if (port == -1) port = protocol == "http" ? 80 : 443;
if (ec)
{
tracker_connection::fail(ec, operation_t::parse_address);
return;
}
aux::session_settings const& settings = m_man.settings();
int const proxy_type = settings.get_int(settings_pack::proxy_type);
if (settings.get_bool(settings_pack::proxy_hostnames)
&& (proxy_type == settings_pack::socks5
|| proxy_type == settings_pack::socks5_pw))
{
m_hostname = hostname;
m_target.port(std::uint16_t(port));
start_announce();
}
else
{
using namespace std::placeholders;
ADD_OUTSTANDING_ASYNC("udp_tracker_connection::name_lookup");
| ||
relevance 2 | ../src/upnp.cpp:106 | use boost::asio::ip::network instead of netmask |
use boost::asio::ip::network instead of netmask../src/upnp.cpp:106
static error_code ignore_error;
upnp::rootdevice::rootdevice() = default;
#if TORRENT_USE_ASSERTS
upnp::rootdevice::~rootdevice()
{
TORRENT_ASSERT(magic == 1337);
magic = 0;
}
#else
upnp::rootdevice::~rootdevice() = default;
#endif
upnp::rootdevice::rootdevice(rootdevice const&) = default;
upnp::rootdevice& upnp::rootdevice::operator=(rootdevice const&) & = default;
upnp::rootdevice::rootdevice(rootdevice&&) noexcept = default;
upnp::rootdevice& upnp::rootdevice::operator=(rootdevice&&) & = default;
upnp::upnp(io_context& ios
, aux::session_settings const& settings
, aux::portmap_callback& cb
, address_v4 const listen_address
, address_v4 const netmask
, std::string listen_device
, listen_socket_handle ls)
: m_settings(settings)
, m_callback(cb)
, m_io_service(ios)
, m_resolver(ios)
, m_multicast(ios)
, m_unicast(ios)
, m_broadcast_timer(ios)
, m_refresh_timer(ios)
, m_map_timer(ios)
, m_listen_address(listen_address)
, m_netmask(netmask)
, m_device(std::move(listen_device))
#if TORRENT_USE_SSL
, m_ssl_ctx(ssl::context::sslv23_client)
#endif
, m_listen_handle(std::move(ls))
{
#if TORRENT_USE_SSL
m_ssl_ctx.set_verify_mode(ssl::context::verify_none);
#endif
}
void upnp::start()
{
| ||
relevance 2 | ../src/tracker_manager.cpp:369 | implement |
implement../src/tracker_manager.cpp:369#ifndef TORRENT_DISABLE_LOGGING
if (m_ses.should_log())
{
m_ses.session_log("incoming UDP tracker packet from %s has invalid "
"transaction ID (%x)", print_endpoint(ep).c_str()
, transaction);
}
#endif
return false;
}
std::shared_ptr<udp_tracker_connection> const p = i->second;
// on_receive() may remove the tracker connection from the list
return p->on_receive(ep, buf);
}
void tracker_manager::incoming_error(error_code const&
, udp::endpoint const&)
{
TORRENT_ASSERT(is_single_thread());
}
bool tracker_manager::incoming_packet(string_view const hostname
, span<char const> const buf)
{
TORRENT_ASSERT(is_single_thread());
// ignore packets smaller than 8 bytes
if (buf.size() < 16) return false;
// the first word is the action, if it's not [0, 3]
// it's not a valid udp tracker response
span<const char> ptr = buf;
std::uint32_t const action = aux::read_uint32(ptr);
if (action > 3) return false;
std::uint32_t const transaction = aux::read_uint32(ptr);
auto const i = m_udp_conns.find(transaction);
if (i == m_udp_conns.end())
{
#ifndef TORRENT_DISABLE_LOGGING
// now, this may not have been meant to be a tracker response,
// but chances are pretty good, so it's probably worth logging
m_ses.session_log("incoming UDP tracker packet from %s has invalid "
"transaction ID (%x)", std::string(hostname).c_str(), int(transaction));
#endif
return false;
}
std::shared_ptr<udp_tracker_connection> const p = i->second;
// on_receive() may remove the tracker connection from the list
| ||
relevance 2 | ../src/escape_string.cpp:194 | this should probably be moved into string_util.cpp |
this should probably be moved into string_util.cpp../src/escape_string.cpp:194 {
return escape_string_impl(str.data(), int(str.size()), 10);
}
bool need_encoding(char const* str, int const len)
{
for (int i = 0; i < len; ++i)
{
if (std::strchr(unreserved_chars, *str) == nullptr || *str == 0)
return true;
++str;
}
return false;
}
void convert_path_to_posix(std::string& path)
{
std::replace(path.begin(), path.end(), '\\', '/');
}
std::string read_until(char const*& str, char const delim, char const* end)
{
TORRENT_ASSERT(str <= end);
std::string ret;
while (str != end && *str != delim)
{
ret += *str;
++str;
}
// skip the delimiter as well
while (str != end && *str == delim) ++str;
return ret;
}
std::string maybe_url_encode(std::string const& url)
{
std::string protocol, host, auth, path;
int port;
error_code ec;
std::tie(protocol, auth, host, port, path) = parse_url_components(url, ec);
if (ec) return url;
// first figure out if this url contains unencoded characters
if (!need_encoding(path.c_str(), int(path.size())))
return url;
std::string msg;
std::string escaped_path { escape_path(path) };
// reserve enough space so further append will
// only copy values to existing location
| ||
relevance 2 | ../src/path.cpp:429 | test this on a FAT volume to see what error we get! |
test this on a FAT volume to see what error we get!../src/path.cpp:429 // it's possible CreateHardLink will copy the file internally too,
// if the filesystem does not support it.
ec.assign(GetLastError(), system_category());
return;
}
// fall back to making a copy
#endif
#else
// assume posix's link() function exists
int ret = ::link(n_exist.c_str(), n_link.c_str());
if (ret == 0)
{
ec.clear();
return;
}
// most errors are passed through, except for the ones that indicate that
// hard links are not supported and require a copy.
if (errno != EMLINK
&& errno != EXDEV
#ifdef TORRENT_BEOS
// haiku returns EPERM when the filesystem doesn't support hard link
&& errno != EPERM
#endif
)
{
// some error happened, report up to the caller
ec.assign(errno, system_category());
return;
}
// fall back to making a copy
#endif
// if we get here, we should copy the file
storage_error se;
aux::copy_file(file, link, se);
ec = se.ec;
}
bool is_directory(std::string const& f, error_code& ec)
{
ec.clear();
error_code e;
file_status s;
stat_file(f, &s, e);
if (!e && s.mode & file_status::directory) return true;
ec = e;
| ||
relevance 2 | ../src/storage_utils.cpp:294 | technically, this is where the transaction of moving the files is completed. This is where the new save_path should be committed. If there is an error in the code below, that should not prevent the new save path to be set. Maybe it would make sense to make the save_path an in-out parameter |
technically, this is where the transaction of moving the files
is completed. This is where the new save_path should be committed. If
there is an error in the code below, that should not prevent the new
save path to be set. Maybe it would make sense to make the save_path
an in-out parameter../src/storage_utils.cpp:294 while (--file_index >= file_index_t(0))
{
// files moved out to absolute paths are not moved
if (f.file_absolute_path(file_index)) continue;
// if we ended up copying the file, don't do anything during
// roll-back
if (copied_files[file_index]) continue;
std::string const old_path = combine_path(save_path, f.file_path(file_index));
std::string const new_path = combine_path(new_save_path, f.file_path(file_index));
// ignore errors when rolling back
storage_error ignore;
move_file(new_path, old_path, ignore);
}
return { status_t::fatal_disk_error, save_path };
}
std::set<std::string> subdirs;
for (auto const i : f.file_range())
{
// files moved out to absolute paths are not moved
if (f.file_absolute_path(i)) continue;
if (has_parent_path(f.file_path(i)))
subdirs.insert(parent_path(f.file_path(i)));
// if we ended up renaming the file instead of moving it, there's no
// need to delete the source.
if (copied_files[i] == false) continue;
std::string const old_path = combine_path(save_path, f.file_path(i));
// we may still have some files in old save_path
// eg. if (flags == dont_replace && exists(new_path))
// ignore errors when removing
error_code ignore;
remove(old_path, ignore);
}
for (std::string const& s : subdirs)
{
error_code err;
std::string subdir = combine_path(save_path, s);
while (!path_equal(subdir, save_path) && !err)
{
remove(subdir, err);
| ||
relevance 2 | ../src/storage_utils.cpp:486 | is this risky? The upper layer will assume we have the whole file. Perhaps we should verify that at least the size of the file is correct |
is this risky? The upper layer will assume we have the
whole file. Perhaps we should verify that at least the size
of the file is correct../src/storage_utils.cpp:486 {
// we create directories lazily, so it's possible it hasn't
// been created yet. Create the directories now and try
// again
create_directories(parent_path(file_path), err);
if (err)
{
ec.file(idx);
ec.operation = operation_t::mkdir;
return false;
}
hard_link(s, file_path, err);
}
// if the file already exists, that's not an error
if (err == boost::system::errc::file_exists)
continue;
if (err)
{
ec.ec = err;
ec.file(idx);
ec.operation = operation_t::file_hard_link;
return false;
}
added_files = true;
stat.set_dirty(idx);
}
}
#endif // TORRENT_DISABLE_MUTABLE_TORRENTS
bool const seed = (rd.have_pieces.size() >= fs.num_pieces()
&& rd.have_pieces.all_set())
|| (rd.flags & torrent_flags::seed_mode);
if (seed)
{
for (file_index_t const file_index : fs.file_range())
{
if (fs.pad_file_at(file_index)) continue;
// files with priority zero may not have been saved to disk at their
// expected location, but is likely to be in a partfile. Just exempt it
// from checking
if (file_index < file_priority.end_index()
&& file_priority[file_index] == dont_download
&& !(rd.flags & torrent_flags::seed_mode))
continue;
| ||
relevance 2 | ../src/http_tracker_connection.cpp:479 | returning a bool here is redundant. Instead this function should return the peer_entry |
returning a bool here is redundant. Instead this function should
return the peer_entry../src/http_tracker_connection.cpp:479 {
cb->tracker_scrape_response(tracker_req(), resp.complete
, resp.incomplete, resp.downloaded, resp.downloaders);
}
else
{
std::list<address> ip_list;
if (m_tracker_connection)
{
for (auto const& endp : m_tracker_connection->endpoints())
{
ip_list.push_back(endp.address());
}
}
cb->tracker_response(tracker_req(), m_tracker_ip, ip_list, resp);
}
close();
}
bool extract_peer_info(bdecode_node const& info, peer_entry& ret, error_code& ec)
{
// extract peer id (if any)
if (info.type() != bdecode_node::dict_t)
{
ec = errors::invalid_peer_dict;
return false;
}
bdecode_node i = info.dict_find_string("peer id");
if (i && i.string_length() == 20)
{
std::copy(i.string_ptr(), i.string_ptr() + 20, ret.pid.begin());
}
else
{
// if there's no peer_id, just initialize it to a bunch of zeroes
ret.pid.clear();
}
// extract ip
i = info.dict_find_string("ip");
if (!i)
{
ec = errors::invalid_tracker_response;
return false;
}
ret.hostname = i.string_value().to_string();
// extract port
i = info.dict_find_int("port");
if (!i)
| ||
relevance 2 | ../src/piece_picker.cpp:2003 | make the 2048 limit configurable |
make the 2048 limit configurable../src/piece_picker.cpp:2003 // indicating which path thought the picker we took to arrive at the
// returned block picks.
picker_flags_t piece_picker::pick_pieces(typed_bitfield<piece_index_t> const& pieces
, std::vector<piece_block>& interesting_blocks, int num_blocks
, int prefer_contiguous_blocks, torrent_peer* peer
, picker_options_t options, std::vector<piece_index_t> const& suggested_pieces
, int num_peers
, counters& pc
) const
{
TORRENT_ASSERT(peer == nullptr || peer->in_use);
picker_flags_t ret;
// prevent the number of partial pieces to grow indefinitely
// make this scale by the number of peers we have. For large
// scale clients, we would have more peers, and allow a higher
// threshold for the number of partials
// the second condition is to make sure we cap the number of partial
// _bytes_. The larger the pieces are, the fewer partial pieces we want.
// 2048 corresponds to 32 MiB
const int num_partials = int(m_downloads[piece_pos::piece_downloading].size());
if (num_partials > num_peers * 3 / 2
|| num_partials * blocks_per_piece() > 2048)
{
// if we have too many partial pieces, prioritize completing
// them. In order for this to have an affect, also disable
// prefer whole pieces (otherwise partial pieces would be de-prioritized)
options |= prioritize_partials;
prefer_contiguous_blocks = 0;
ret |= picker_log_alert::partial_ratio;
}
if (prefer_contiguous_blocks) ret |= picker_log_alert::prefer_contiguous;
// only one of rarest_first and sequential can be set.
TORRENT_ASSERT(((options & rarest_first) ? 1 : 0)
+ ((options & sequential) ? 1 : 0) <= 1);
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
INVARIANT_CHECK;
#endif
TORRENT_ASSERT(num_blocks > 0);
TORRENT_ASSERT(pieces.size() == int(m_piece_map.size()));
TORRENT_ASSERT(!m_priority_boundaries.empty() || m_dirty);
// this will be filled with blocks that we should not request
// unless we can't find num_blocks among the other ones.
std::vector<piece_block> backup_blocks;
std::vector<piece_block> backup_blocks2;
std::vector<piece_index_t> ignored_pieces;
| ||
relevance 2 | ../src/piece_picker.cpp:2600 | the first_block returned here is the largest free range, not the first-fit range, which would be better |
the first_block returned here is the largest free range, not
the first-fit range, which would be better../src/piece_picker.cpp:2600 {
for (auto const& b : m_block_info)
{
TORRENT_ASSERT(b.peer == nullptr || static_cast<torrent_peer*>(b.peer)->in_use);
}
}
#endif
void piece_picker::clear_peer(torrent_peer* peer)
{
for (auto& b : m_block_info)
{
if (b.peer == peer) b.peer = nullptr;
}
}
// the first bool is true if this is the only peer that has requested and downloaded
// blocks from this piece.
// the second bool is true if this is the only active peer that is requesting
// and downloading blocks from this piece. Active means having a connection.
std::tuple<bool, bool, int, int> piece_picker::requested_from(
piece_picker::downloading_piece const& p
, int const num_blocks_in_piece, torrent_peer* peer) const
{
bool exclusive = true;
bool exclusive_active = true;
int contiguous_blocks = 0;
int max_contiguous = 0;
int first_block = 0;
int idx = -1;
for (auto const& info : blocks_for_piece(p))
{
++idx;
TORRENT_ASSERT(info.peer == nullptr || info.peer->in_use);
TORRENT_ASSERT(info.piece_index == p.index);
if (info.state == piece_picker::block_info::state_none)
{
++contiguous_blocks;
continue;
}
if (contiguous_blocks > max_contiguous)
{
max_contiguous = contiguous_blocks;
first_block = idx - contiguous_blocks;
}
contiguous_blocks = 0;
if (info.peer != peer)
{
exclusive = false;
if (info.state == piece_picker::block_info::state_requested
&& info.peer != nullptr)
| ||
relevance 2 | ../src/piece_picker.cpp:3485 | it would be nice if this could be folded into lock_piece() the main distinction is that this also maintains the m_num_passed counter and the passed_hash_check member Is there ever a case where we call write failed without also locking the piece? Perhaps write_failed() should imply locking it. |
it would be nice if this could be folded into lock_piece()
the main distinction is that this also maintains the m_num_passed
counter and the passed_hash_check member
Is there ever a case where we call write failed without also locking
the piece? Perhaps write_failed() should imply locking it.../src/piece_picker.cpp:3485 auto const state = m_piece_map[piece].download_queue();
if (state == piece_pos::piece_open) return;
auto const i = find_dl_piece(state, piece);
if (i == m_downloads[state].end()) return;
TORRENT_ASSERT(i->passed_hash_check == false);
if (i->passed_hash_check)
{
// it's not clear why this would happen,
// but it seems reasonable to not break the
// accounting over it.
i->passed_hash_check = false;
TORRENT_ASSERT(m_num_passed > 0);
--m_num_passed;
}
// prevent this piece from being picked until it's restored
i->locked = true;
}
void piece_picker::write_failed(piece_block const block)
{
INVARIANT_CHECK;
#if TORRENT_USE_INVARIANT_CHECKS
check_piece_state();
#endif
#ifdef TORRENT_PICKER_LOG
std::cerr << "[" << this << "] " << "write_failed( {" << block.piece_index << ", " << block.block_index << "} )" << std::endl;
#endif
auto const state = m_piece_map[block.piece_index].download_queue();
if (state == piece_pos::piece_open) return;
auto i = find_dl_piece(state, block.piece_index);
if (i == m_downloads[state].end()) return;
auto const binfo = mutable_blocks_for_piece(*i);
block_info& info = binfo[block.block_index];
TORRENT_ASSERT(&info >= &m_block_info[0]);
TORRENT_ASSERT(&info < &m_block_info[0] + m_block_info.size());
TORRENT_ASSERT(info.piece_index == block.piece_index);
TORRENT_ASSERT(info.state == block_info::state_writing);
TORRENT_ASSERT(info.num_peers == 0);
TORRENT_ASSERT(i->writing > 0);
TORRENT_ASSERT(info.state == block_info::state_writing);
if (info.state == block_info::state_finished) return;
if (info.state == block_info::state_writing) --i->writing;
| ||
relevance 2 | ../src/session_impl.cpp:599 | is there a reason not to move all of this into init()? and just post it to the io_context? |
is there a reason not to move all of this into init()? and just
post it to the io_context?../src/session_impl.cpp:599 try
#endif
{
(this->*f)(std::forward<Args>(a)...);
}
#ifndef BOOST_NO_EXCEPTIONS
catch (system_error const& e) {
alerts().emplace_alert<session_error_alert>(e.code(), e.what());
pause();
} catch (std::exception const& e) {
alerts().emplace_alert<session_error_alert>(error_code(), e.what());
pause();
} catch (...) {
alerts().emplace_alert<session_error_alert>(error_code(), "unknown error");
pause();
}
#endif
// This function is called by the creating thread, not in the message loop's
// io_context thread.
void session_impl::start_session()
{
#ifndef TORRENT_DISABLE_LOGGING
session_log("start session");
#endif
#if TORRENT_USE_SSL
error_code ec;
m_ssl_ctx.set_default_verify_paths(ec);
#ifndef TORRENT_DISABLE_LOGGING
if (ec) session_log("SSL set_default verify_paths failed: %s", ec.message().c_str());
ec.clear();
#endif
#if defined TORRENT_WINDOWS && defined TORRENT_USE_OPENSSL && !defined TORRENT_WINRT
| ||
relevance 2 | ../src/session_impl.cpp:761 | the ip filter should probably be saved here too |
the ip filter should probably be saved here too../src/session_impl.cpp:761 m_lsd_announce_timer.async_wait([this](error_code const& e) {
wrap(&session_impl::on_lsd_announce, e); } );
#ifndef TORRENT_DISABLE_LOGGING
session_log(" done starting session");
#endif
// this applies unchoke settings from m_settings
recalculate_unchoke_slots();
// apply all m_settings to this session
run_all_updates(*this);
reopen_listen_sockets(false);
#if TORRENT_USE_INVARIANT_CHECKS
check_invariant();
#endif
}
#if TORRENT_ABI_VERSION <= 2
void session_impl::save_state(entry* eptr, save_state_flags_t const flags) const
{
TORRENT_ASSERT(is_single_thread());
entry& e = *eptr;
// make it a dict
e.dict();
if (flags & session::save_settings)
{
entry::dictionary_type& sett = e["settings"].dict();
save_settings_to_dict(non_default_settings(m_settings), sett);
}
#ifndef TORRENT_DISABLE_DHT
if (flags & session::save_dht_settings)
{
e["dht"] = dht::save_dht_settings(get_dht_settings());
}
if (m_dht && (flags & session::save_dht_state))
{
e["dht state"] = dht::save_dht_state(m_dht->state());
}
#endif
#ifndef TORRENT_DISABLE_EXTENSIONS
for (auto const& ext : m_ses_extensions[plugins_all_idx])
{
ext->save_state(*eptr);
}
| ||
relevance 2 | ../src/session_impl.cpp:3880 | make a list for torrents that want to be announced on the DHT so we don't have to loop over all torrents, just to find the ones that want to announce |
make a list for torrents that want to be announced on the DHT so we
don't have to loop over all torrents, just to find the ones that want to announce../src/session_impl.cpp:3880 if (!m_dht_torrents.empty())
{
std::shared_ptr<torrent> t;
do
{
t = m_dht_torrents.front().lock();
m_dht_torrents.pop_front();
} while (!t && !m_dht_torrents.empty());
if (t)
{
t->dht_announce();
return;
}
}
if (m_torrents.empty()) return;
if (m_next_dht_torrent >= m_torrents.size())
m_next_dht_torrent = 0;
m_torrents[m_next_dht_torrent]->dht_announce();
++m_next_dht_torrent;
if (m_next_dht_torrent >= m_torrents.size())
m_next_dht_torrent = 0;
}
#endif
void session_impl::on_lsd_announce(error_code const& e)
{
COMPLETE_ASYNC("session_impl::on_lsd_announce");
m_stats_counters.inc_stats_counter(counters::on_lsd_counter);
TORRENT_ASSERT(is_single_thread());
if (e) return;
if (m_abort) return;
ADD_OUTSTANDING_ASYNC("session_impl::on_lsd_announce");
// announce on local network every 5 minutes
int const delay = std::max(m_settings.get_int(settings_pack::local_service_announce_interval)
/ std::max(int(m_torrents.size()), 1), 1);
m_lsd_announce_timer.expires_after(seconds(delay));
m_lsd_announce_timer.async_wait([this](error_code const& err) {
wrap(&session_impl::on_lsd_announce, err); });
if (m_torrents.empty()) return;
if (m_next_lsd_torrent >= m_torrents.size())
m_next_lsd_torrent = 0;
m_torrents[m_next_lsd_torrent]->lsd_announce();
++m_next_lsd_torrent;
if (m_next_lsd_torrent >= m_torrents.size())
m_next_lsd_torrent = 0;
| ||
relevance 2 | ../src/session_impl.cpp:5513 | this function should be removed and users need to deal with the more generic case of having multiple listen ports |
this function should be removed and users need to deal with the
more generic case of having multiple listen ports../src/session_impl.cpp:5513#ifndef TORRENT_DISABLE_LOGGING
if (!node_list.empty() && nodes.empty())
{
session_log("ERROR: failed to parse DHT bootstrap list: %s", node_list.c_str());
}
#endif
for (auto const& n : nodes)
add_dht_router(n);
#endif
}
void session_impl::update_count_slow()
{
error_code ec;
for (auto const& tp : m_torrents)
{
tp->on_inactivity_tick(ec);
}
}
std::uint16_t session_impl::listen_port() const
{
return listen_port(nullptr);
}
std::uint16_t session_impl::listen_port(listen_socket_t* sock) const
{
if (m_listen_sockets.empty()) return 0;
if (sock)
{
// if we're using a proxy, we won't be able to accept any TCP
// connections. Not even uTP connections via the port we know about.
// The DHT may use the implied port to make it work, but the port we
// announce here has no relevance for that.
if (sock->flags & listen_socket_t::proxy)
return 0;
if (!(sock->flags & listen_socket_t::accept_incoming))
return 0;
return std::uint16_t(sock->tcp_external_port());
}
#ifdef TORRENT_SSL_PEERS
for (auto const& s : m_listen_sockets)
{
if (!(s->flags & listen_socket_t::accept_incoming)) continue;
if (s->ssl == transport::plaintext)
return std::uint16_t(s->tcp_external_port());
}
return 0;
| ||
relevance 2 | ../src/session_impl.cpp:5553 | this function should be removed and users need to deal with the more generic case of having multiple ssl ports |
this function should be removed and users need to deal with the
more generic case of having multiple ssl ports../src/session_impl.cpp:5553 return 0;
return std::uint16_t(sock->tcp_external_port());
}
#ifdef TORRENT_SSL_PEERS
for (auto const& s : m_listen_sockets)
{
if (!(s->flags & listen_socket_t::accept_incoming)) continue;
if (s->ssl == transport::plaintext)
return std::uint16_t(s->tcp_external_port());
}
return 0;
#else
sock = m_listen_sockets.front().get();
if (!(sock->flags & listen_socket_t::accept_incoming)) return 0;
return std::uint16_t(sock->tcp_external_port());
#endif
}
std::uint16_t session_impl::ssl_listen_port() const
{
return ssl_listen_port(nullptr);
}
std::uint16_t session_impl::ssl_listen_port(listen_socket_t* sock) const
{
#ifdef TORRENT_SSL_PEERS
if (sock)
{
if (!(sock->flags & listen_socket_t::accept_incoming)) return 0;
return std::uint16_t(sock->tcp_external_port());
}
if (m_settings.get_int(settings_pack::proxy_type) != settings_pack::none
&& m_settings.get_bool(settings_pack::proxy_peer_connections))
return 0;
for (auto const& s : m_listen_sockets)
{
if (!(s->flags & listen_socket_t::accept_incoming)) continue;
if (s->ssl == transport::ssl)
return std::uint16_t(s->tcp_external_port());
}
#else
TORRENT_UNUSED(sock);
#endif
return 0;
}
int session_impl::get_listen_port(transport const ssl, aux::listen_socket_handle const& s)
| ||
relevance 2 | ../src/session_impl.cpp:6388 | this should be factored into the udp socket, so we only have the code once |
this should be factored into the udp socket, so we only have the
code once../src/session_impl.cpp:6388 return upload_rate_limit(m_local_peer_class);
}
int session_impl::local_download_rate_limit() const
{
return download_rate_limit(m_local_peer_class);
}
int session_impl::upload_rate_limit_depr() const
{
return upload_rate_limit(m_global_class);
}
int session_impl::download_rate_limit_depr() const
{
return download_rate_limit(m_global_class);
}
#endif // DEPRECATE
void session_impl::update_peer_dscp()
{
int const value = m_settings.get_int(settings_pack::peer_dscp);
for (auto const& l : m_listen_sockets)
{
if (l->sock)
{
error_code ec;
set_traffic_class(*l->sock, value, ec);
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
session_log(">>> SET_DSCP [ tcp (%s %d) value: %x e: %s ]"
, l->sock->local_endpoint().address().to_string().c_str()
, l->sock->local_endpoint().port(), value, ec.message().c_str());
}
#endif
}
if (l->udp_sock)
{
error_code ec;
set_traffic_class(l->udp_sock->sock, value, ec);
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
session_log(">>> SET_DSCP [ udp (%s %d) value: %x e: %s ]"
, l->udp_sock->sock.local_endpoint().address().to_string().c_str()
, l->udp_sock->sock.local_port()
| ||
relevance 2 | ../src/kademlia/node.cpp:684 | it would be nice to have a bias towards node-id prefixes that are missing in the bucket |
it would be nice to have a bias towards node-id prefixes that
are missing in the bucket../src/kademlia/node.cpp:684 node_entry const* ne = m_table.next_refresh();
if (ne == nullptr) return;
// this shouldn't happen
TORRENT_ASSERT(m_id != ne->id);
if (ne->id == m_id) return;
int const bucket = 159 - distance_exp(m_id, ne->id);
TORRENT_ASSERT(bucket < 160);
send_single_refresh(ne->ep(), bucket, ne->id);
}
void node::send_single_refresh(udp::endpoint const& ep, int const bucket
, node_id const& id)
{
TORRENT_ASSERT(id != m_id);
TORRENT_ASSERT(bucket >= 0);
TORRENT_ASSERT(bucket <= 159);
// generate a random node_id within the given bucket
node_id mask = generate_prefix_mask(bucket + 1);
node_id target = generate_secret_id() & ~mask;
target |= m_id & mask;
// create a dummy traversal_algorithm
auto algo = std::make_shared<traversal_algorithm>(*this, node_id());
auto o = m_rpc.allocate_observer<ping_observer>(std::move(algo), ep, id);
if (!o) return;
#if TORRENT_USE_ASSERTS
o->m_in_constructor = false;
#endif
entry e;
e["y"] = "q";
if (m_table.is_full(bucket))
{
// current bucket is full, just ping it.
e["q"] = "ping";
m_counters.inc_stats_counter(counters::dht_ping_out);
}
else
{
// use get_peers instead of find_node. We'll get nodes in the response
// either way.
e["q"] = "get_peers";
e["a"]["info_hash"] = target.to_string();
m_counters.inc_stats_counter(counters::dht_get_peers_out);
}
o->flags |= observer::flag_queried;
m_rpc.invoke(e, ep, o);
| ||
relevance 2 | ../src/kademlia/node.cpp:757 | use the non deprecated function instead of this one |
use the non deprecated function instead of this one../src/kademlia/node.cpp:757 ret.local_endpoint = make_udp(m_sock.get_local_endpoint());
m_table.status(ret.table);
for (auto const& r : m_running_requests)
{
ret.requests.emplace_back();
dht_lookup& lookup = ret.requests.back();
r->status(lookup);
}
return ret;
}
std::tuple<int, int, int> node::get_stats_counters() const
{
int nodes, replacements;
std::tie(nodes, replacements, std::ignore) = size();
return std::make_tuple(nodes, replacements, m_rpc.num_allocated_observers());
}
#if TORRENT_ABI_VERSION == 1
void node::status(session_status& s)
{
std::lock_guard<std::mutex> l(m_mutex);
m_table.status(s);
s.dht_total_allocations += m_rpc.num_allocated_observers();
for (auto& r : m_running_requests)
{
s.active_requests.emplace_back();
dht_lookup& lookup = s.active_requests.back();
r->status(lookup);
}
}
#endif
bool node::lookup_peers(sha1_hash const& info_hash, entry& reply
, bool noseed, bool scrape, address const& requester) const
{
if (m_observer)
m_observer->get_peers(info_hash);
return m_storage.get_peers(info_hash, noseed, scrape, requester, reply);
}
entry write_nodes_entry(std::vector<node_entry> const& nodes)
{
entry r;
std::back_insert_iterator<std::string> out(r.string());
for (auto const& n : nodes)
{
std::copy(n.id.begin(), n.id.end(), out);
| ||
relevance 2 | ../src/kademlia/dht_storage.cpp:87 | make this configurable in dht_settings |
make this configurable in dht_settings../src/kademlia/dht_storage.cpp:87 tcp::endpoint addr;
bool seed = 0;
};
// internal
bool operator<(peer_entry const& lhs, peer_entry const& rhs)
{
return lhs.addr.address() == rhs.addr.address()
? lhs.addr.port() < rhs.addr.port()
: lhs.addr.address() < rhs.addr.address();
}
// this is a group. It contains a set of group members
struct torrent_entry
{
std::string name;
std::vector<peer_entry> peers4;
std::vector<peer_entry> peers6;
};
constexpr time_duration announce_interval = minutes(30);
struct dht_immutable_item
{
// the actual value
std::unique_ptr<char[]> value;
// this counts the number of IPs we have seen
// announcing this item, this is used to determine
// popularity if we reach the limit of items to store
bloom_filter<128> ips;
// the last time we heard about this item
// the correct interpretation of this field
// requires a time reference
time_point last_seen;
// number of IPs in the bloom filter
int num_announcers = 0;
// size of malloced space pointed to by value
int size = 0;
};
struct dht_mutable_item : dht_immutable_item
{
signature sig{};
sequence_number seq{};
public_key key{};
std::string salt;
};
void set_value(dht_immutable_item& item, span<char const> buf)
{
int const size = int(buf.size());
| ||
relevance 2 | ../src/kademlia/routing_table.cpp:305 | use the non deprecated function instead of this one |
use the non deprecated function instead of this one../src/kademlia/routing_table.cpp:305 static const aux::array<int, 4> size_exceptions{{{16, 8, 4, 2}}};
if (bucket < size_exceptions.end_index())
return m_bucket_size * size_exceptions[bucket];
return m_bucket_size;
}
void routing_table::status(std::vector<dht_routing_bucket>& s) const
{
if (s.size() > m_buckets.size()) return;
s.clear();
for (auto const& i : m_buckets)
{
dht_routing_bucket b;
b.num_nodes = int(i.live_nodes.size());
b.num_replacements = int(i.replacements.size());
s.push_back(b);
}
}
#if TORRENT_ABI_VERSION == 1
void routing_table::status(session_status& s) const
{
int dht_nodes;
int dht_node_cache;
int ignore;
std::tie(dht_nodes, dht_node_cache, ignore) = size();
s.dht_nodes += dht_nodes;
s.dht_node_cache += dht_node_cache;
| ||
relevance 2 | ../src/kademlia/routing_table.cpp:940 | move the lowest priority nodes to the replacement bucket |
move the lowest priority nodes to the replacement bucket../src/kademlia/routing_table.cpp:940 bucket_t& rb = m_buckets[bucket_index].replacements;
// move any node whose (160 - distance_exp(m_id, id)) >= (i - m_buckets.begin())
// to the new bucket
int const new_bucket_size = bucket_limit(bucket_index + 1);
for (auto j = b.begin(); j != b.end();)
{
int const d = distance_exp(m_id, j->id);
if (d >= 159 - bucket_index)
{
++j;
continue;
}
// this entry belongs in the new bucket
new_bucket.push_back(*j);
j = b.erase(j);
}
if (int(b.size()) > bucket_size_limit)
{
for (auto i = b.begin() + bucket_size_limit
, end(b.end()); i != end; ++i)
{
rb.push_back(*i);
}
b.resize(bucket_size_limit);
}
// split the replacement bucket as well. If the live bucket
// is not full anymore, also move the replacement entries
// into the main bucket
for (auto j = rb.begin(); j != rb.end();)
{
if (distance_exp(m_id, j->id) >= 159 - bucket_index)
{
if (!j->pinged() || int(b.size()) >= bucket_size_limit)
{
++j;
continue;
}
b.push_back(*j);
}
else
{
// this entry belongs in the new bucket
if (j->pinged() && int(new_bucket.size()) < new_bucket_size)
new_bucket.push_back(*j);
else
new_replacement_bucket.push_back(*j);
}
| ||
relevance 2 | ../include/libtorrent/piece_picker.hpp:647 | having 8 priority levels is probably excessive. It should probably be changed to 3 levels + dont-download |
having 8 priority levels is probably excessive. It should
probably be changed to 3 levels + dont-download../include/libtorrent/piece_picker.hpp:647 else if (state() == piece_full)
state(piece_full_reverse);
}
// the number of peers that has this piece
// (availability)
std::uint32_t peer_count : 26;
// one of the download_queue_t values. This indicates whether this piece
// is currently being downloaded or not, and what state it's in if
// it is. Specifically, as an optimization, pieces that have all blocks
// requested from them are separated out into separate lists to make
// lookups quicker. The main oddity is that whether a downloading piece
// has only been requested from peers that are reverse, that's
// recorded as piece_downloading_reverse, which really means the same
// as piece_downloading, it just saves space to also indicate that it
// has a bit lower priority. The reverse bit is only relevant if the
// state is piece_downloading.
std::uint32_t download_state : 3;
// is 0 if the piece is filtered (not to be downloaded)
// 1 is low priority
// 2 is low priority
// 3 is mid priority
// 4 is default priority
// 5 is mid priority
// 6 is high priority
// 7 is high priority
std::uint32_t piece_priority : 3;
// index in to the piece_info vector
prio_index_t index;
#ifdef TORRENT_DEBUG_REFCOUNTS
// all the peers that have this piece
std::set<const torrent_peer*> have_peers;
#endif
// index is set to this to indicate that we have the
// piece. There is no entry for the piece in the
// buckets if this is the case.
static constexpr prio_index_t we_have_index{-1};
// the priority value that means the piece is filtered
static constexpr std::uint32_t filter_priority = 0;
// the max number the peer count can hold
static constexpr std::uint32_t max_peer_count = 0xffff;
bool have() const { return index == we_have_index; }
| ||
relevance 2 | ../include/libtorrent/proxy_base.hpp:298 | use the resolver interface that has a built-in cache |
use the resolver interface that has a built-in cache../include/libtorrent/proxy_base.hpp:298protected:
// The handler must be taken as lvalue reference here since we may not call
// it. But if we do, we want the call operator to own the function object.
template <typename Handler>
bool handle_error(error_code const& e, Handler&& h)
{
if (!e) return false;
std::forward<Handler>(h)(e);
error_code ec;
close(ec);
return true;
}
aux::noexcept_movable<tcp::socket> m_sock;
std::string m_hostname; // proxy host
int m_port; // proxy port
aux::noexcept_movable<endpoint_type> m_remote_endpoint;
aux::noexcept_move_only<tcp::resolver> m_resolver;
};
template <typename Handler, typename UnderlyingHandler>
struct wrap_allocator_t
{
wrap_allocator_t(Handler h, UnderlyingHandler uh)
: m_handler(std::move(h))
, m_underlying_handler(std::move(uh))
{}
wrap_allocator_t(wrap_allocator_t const&) = default;
wrap_allocator_t(wrap_allocator_t&&) = default;
template <class... A>
void operator()(A&&... a)
{
m_handler(std::forward<A>(a)..., std::move(m_underlying_handler));
}
using allocator_type = typename boost::asio::associated_allocator<UnderlyingHandler>::type;
using executor_type = typename boost::asio::associated_executor<UnderlyingHandler>::type;
allocator_type get_allocator() const noexcept
{ return boost::asio::get_associated_allocator(m_underlying_handler); }
executor_type get_executor() const noexcept
{
return boost::asio::get_associated_executor(m_underlying_handler);
}
| ||
relevance 2 | ../include/libtorrent/peer_connection.hpp:996 | this should really be a circular buffer |
this should really be a circular buffer../include/libtorrent/peer_connection.hpp:996 // it just serves as a queue to remember what we've sent, to avoid
// re-sending suggests for the same piece
// i.e. outgoing suggest pieces
aux::vector<piece_index_t> m_suggest_pieces;
// the pieces we will send to the peer
// if requested (regardless of choke state)
std::vector<piece_index_t> m_accept_fast;
// a sent-piece counter for the allowed fast set
// to avoid exploitation. Each slot is a counter
// for one of the pieces from the allowed-fast set
aux::vector<std::uint16_t> m_accept_fast_piece_cnt;
// the pieces the peer will send us if
// requested (regardless of choke state)
std::vector<piece_index_t> m_allowed_fast;
// pieces that has been suggested to be downloaded from this peer
// i.e. incoming suggestions
aux::vector<piece_index_t> m_suggested_pieces;
// the time when this peer last saw a complete copy
// of this torrent
time_t m_last_seen_complete = 0;
// the block we're currently receiving. Or
// (-1, -1) if we're not receiving one
piece_block m_receiving_block = piece_block::invalid;
// the local endpoint for this peer, i.e. our address
// and our port. If this is set for outgoing connections
// before the connection completes, it means we want to
// force the connection to be bound to the specified interface.
// if it ends up being bound to a different local IP, the connection
// is closed.
tcp::endpoint m_local;
// remote peer's id
peer_id m_peer_id;
protected:
template <typename Fun, typename... Args>
void wrap(Fun f, Args&&... a);
// statistics about upload and download speeds
// and total amount of uploads and downloads for
// this peer
| ||
relevance 2 | ../include/libtorrent/peer_connection.hpp:1086 | rename this target queue size |
rename this target queue size../include/libtorrent/peer_connection.hpp:1086 // this is the piece that is available to this peer. Only
// these two pieces can be downloaded from us by this peer.
// This will remain the current piece for this peer until
// another peer sends us a have message for this piece
std::array<piece_index_t, 2> m_superseed_piece = {{piece_index_t(-1), piece_index_t(-1)}};
#endif
// the number of bytes send to the disk-io
// thread that hasn't yet been completely written.
int m_outstanding_writing_bytes = 0;
// max transfer rates seen on this peer
int m_download_rate_peak = 0;
int m_upload_rate_peak = 0;
// stop sending data after this many bytes, INT_MAX = inf
int m_send_barrier = INT_MAX;
// the number of request we should queue up
// at the remote end.
std::uint16_t m_desired_queue_size = 4;
// if set to non-zero, this peer will always prefer
// to request entire n pieces, rather than blocks.
// where n is the value of this variable.
// if it is 0, the download rate limit setting
// will be used to determine if whole pieces
// are preferred.
std::uint16_t m_prefer_contiguous_blocks = 0;
// this is the number of times this peer has had
// a request rejected because of a disk I/O failure.
// once this reaches a certain threshold, the
// peer is disconnected in order to avoid infinite
// loops of consistent failures
std::uint8_t m_disk_read_failures = 0;
// this is used in seed mode whenever we trigger a hash check
// for a piece, before we read it. It's used to throttle
// the hash checks to just a few per peer at a time.
std::uint8_t m_outstanding_piece_verification:3;
// is true if it was we that connected to the peer
// and false if we got an incoming connection
// could be considered: true = local, false = remote
bool m_outgoing:1;
// is true if we learn the incoming connections listening
// during the extended handshake
bool m_received_listen_port:1;
| ||
relevance 2 | ../include/libtorrent/enum_net.hpp:196 | this could be done more efficiently by just looking up the interface with the given name, maybe even with if_nametoindex() |
this could be done more efficiently by just looking up
the interface with the given name, maybe even with if_nametoindex()../include/libtorrent/enum_net.hpp:196 // IPv6 address. If we're asking to be bound to an IPv6 address and
// providing 0.0.0.0 as the device, turn it into "::"
if (ip == address_v4::any() && protocol == boost::asio::ip::tcp::v6())
ip = address_v6::any();
bind_ep.address(ip);
// it appears to be an IP. Just bind to that address
sock.bind(bind_ep, ec);
return bind_ep.address();
}
ec.clear();
#if TORRENT_HAS_BINDTODEVICE
// try to use SO_BINDTODEVICE here, if that exists. If it fails,
// fall back to the mechanism we have below
aux::bind_device(sock, device_name, ec);
if (ec)
#endif
{
ec.clear();
std::vector<ip_interface> ifs = enum_net_interfaces(ios, ec);
if (ec) return bind_ep.address();
bool found = false;
for (auto const& iface : ifs)
{
// we're looking for a specific interface, and its address
// (which must be of the same family as the address we're
// connecting to)
if (std::strcmp(iface.name, device_name) != 0) continue;
if (iface.interface_address.is_v4() != (protocol == boost::asio::ip::tcp::v4()))
continue;
bind_ep.address(iface.interface_address);
found = true;
break;
}
if (!found)
{
ec = error_code(boost::system::errc::no_such_device, generic_category());
return bind_ep.address();
}
}
sock.bind(bind_ep, ec);
return bind_ep.address();
}
// returns the device name whose local address is ``addr``. If
// no such device is found, an empty string is returned.
| ||
relevance 2 | ../include/libtorrent/socks5_stream.hpp:153 | add async_connect() that takes a hostname and port as well |
add async_connect() that takes a hostname and port as well../include/libtorrent/socks5_stream.hpp:153 TORRENT_ASSERT(!aux::is_ip_address(host));
m_dst_name = host;
if (m_dst_name.size() > 255)
m_dst_name.resize(255);
}
void close(error_code& ec)
{
m_dst_name.clear();
proxy_base::close(ec);
}
#ifndef BOOST_NO_EXCEPTIONS
void close()
{
m_dst_name.clear();
proxy_base::close();
}
#endif
template <class Handler>
void async_connect(endpoint_type const& endpoint, Handler handler)
{
// make sure we don't try to connect to INADDR_ANY. binding is fine,
// and using a hostname is fine on SOCKS version 5.
TORRENT_ASSERT(endpoint.address() != address()
|| (!m_dst_name.empty() && m_version == 5));
m_remote_endpoint = endpoint;
// the connect is split up in the following steps:
// 1. resolve name of proxy server
// 2. connect to proxy server
// 3. if version == 5:
// 3.1 send SOCKS5 authentication method message
// 3.2 read SOCKS5 authentication response
// 3.3 send username+password
// 4. send SOCKS command message
ADD_OUTSTANDING_ASYNC("socks5_stream::name_lookup");
m_resolver.async_resolve(m_hostname, to_string(m_port).data(), wrap_allocator(
[this](error_code const& ec, tcp::resolver::results_type ips, Handler hn) {
name_lookup(ec, std::move(ips), std::move(hn));
}, std::move(handler)));
}
private:
template <typename Handler>
void name_lookup(error_code const& e, tcp::resolver::results_type ips
, Handler h)
| ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:127 | make this interface a lot smaller. It could be split up into several smaller interfaces. Each subsystem could then limit the size of the mock object to test it. |
make this interface a lot smaller. It could be split up into
several smaller interfaces. Each subsystem could then limit the size
of the mock object to test it.../include/libtorrent/aux_/session_interface.hpp:127 // a release build with logging disabled (which is the default) will
// not have this class at all
struct TORRENT_EXTRA_EXPORT session_logger
{
#ifndef TORRENT_DISABLE_LOGGING
virtual bool should_log() const = 0;
virtual void session_log(char const* fmt, ...) const TORRENT_FORMAT(2,3) = 0;
#endif
#if TORRENT_USE_ASSERTS
virtual bool is_single_thread() const = 0;
virtual bool has_peer(peer_connection const* p) const = 0;
virtual bool any_torrent_has_peer(peer_connection const* p) const = 0;
virtual bool is_posting_torrent_updates() const = 0;
#endif
protected:
~session_logger() {}
};
#endif // TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
struct TORRENT_EXTRA_EXPORT session_interface
#if !defined TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
: session_logger
#endif
{
| ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:136 | the IP voting mechanism should be factored out to its own class, not part of the session and these constants should move too |
the IP voting mechanism should be factored out
to its own class, not part of the session
and these constants should move too../include/libtorrent/aux_/session_interface.hpp:136 virtual void session_log(char const* fmt, ...) const TORRENT_FORMAT(2,3) = 0;
#endif
#if TORRENT_USE_ASSERTS
virtual bool is_single_thread() const = 0;
virtual bool has_peer(peer_connection const* p) const = 0;
virtual bool any_torrent_has_peer(peer_connection const* p) const = 0;
virtual bool is_posting_torrent_updates() const = 0;
#endif
protected:
~session_logger() {}
};
#endif // TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
struct TORRENT_EXTRA_EXPORT session_interface
#if !defined TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
: session_logger
#endif
{
// the logic in ip_voter relies on more reliable sources are represented
// by more significant bits
static constexpr ip_source_t source_dht = 1_bit;
static constexpr ip_source_t source_peer = 2_bit;
static constexpr ip_source_t source_tracker = 3_bit;
static constexpr ip_source_t source_router = 4_bit;
virtual void set_external_address(tcp::endpoint const& local_endpoint
, address const& ip
, ip_source_t source_type, address const& source) = 0;
virtual external_ip external_address() const = 0;
virtual disk_interface& disk_thread() = 0;
virtual alert_manager& alerts() = 0;
virtual torrent_peer_allocator_interface& get_peer_allocator() = 0;
virtual io_context& get_context() = 0;
virtual aux::resolver_interface& get_resolver() = 0;
virtual bool has_connection(peer_connection* p) const = 0;
virtual void insert_peer(std::shared_ptr<peer_connection> const& c) = 0;
virtual void remove_torrent(torrent_handle const& h, remove_flags_t options = {}) = 0;
virtual void remove_torrent_impl(std::shared_ptr<torrent> tptr, remove_flags_t options) = 0;
// port filter
virtual port_filter const& get_port_filter() const = 0;
virtual void ban_ip(address addr) = 0;
| ||
relevance 2 | ../include/libtorrent/aux_/chained_buffer.hpp:60 | this type should probably be renamed to send_buffer |
this type should probably be renamed to send_buffer../include/libtorrent/aux_/chained_buffer.hpp:60#include "libtorrent/aux_/buffer.hpp"
#include <deque>
#include <vector>
#include "libtorrent/aux_/disable_warnings_push.hpp"
#include <boost/asio/buffer.hpp>
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#ifdef _MSC_VER
// visual studio requires the value in a deque to be copyable. C++11
// has looser requirements depending on which functions are actually used.
#define TORRENT_CPP98_DEQUE 1
#else
#define TORRENT_CPP98_DEQUE 0
#endif
namespace libtorrent {
namespace aux {
struct TORRENT_EXTRA_EXPORT chained_buffer : private single_threaded
{
chained_buffer(): m_bytes(0), m_capacity(0)
{
thread_started();
#if TORRENT_USE_ASSERTS
m_destructed = false;
#endif
}
private:
// destructs/frees the holder object
using destruct_holder_fun = void (*)(void*);
using move_construct_holder_fun = void (*)(void*, void*);
struct buffer_t
{
buffer_t() {}
#if TORRENT_CPP98_DEQUE
buffer_t(buffer_t&& rhs) noexcept
{
destruct_holder = rhs.destruct_holder;
move_holder = rhs.move_holder;
buf = rhs.buf;
size = rhs.size;
used_size = rhs.used_size;
move_holder(&holder, &rhs.holder);
}
buffer_t& operator=(buffer_t&& rhs) & noexcept
{
| ||
relevance 1 | ../src/torrent.cpp:1125 | make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file |
make this depend on the error and on the filesystem the
files are being downloaded to. If the error is no_space_left_on_device
and the filesystem doesn't support sparse files, only zero the priorities
of the pieces that are at the tails of all files, leaving everything
up to the highest written piece in each file../src/torrent.cpp:1125
// notify the user of the error
if (alerts().should_post<file_error_alert>())
alerts().emplace_alert<file_error_alert>(error.ec
, resolve_filename(error.file()), error.operation, get_handle());
// if a write operation failed, and future writes are likely to
// fail, while reads may succeed, just set the torrent to upload mode
// if we make an incorrect assumption here, it's not the end of the
// world, if we ever issue a read request and it fails as well, we
// won't get in here and we'll actually end up pausing the torrent
if (rw == disk_class::write
&& (error.ec == boost::system::errc::read_only_file_system
|| error.ec == boost::system::errc::permission_denied
|| error.ec == boost::system::errc::operation_not_permitted
|| error.ec == boost::system::errc::no_space_on_device
|| error.ec == boost::system::errc::file_too_large))
{
// if we failed to write, stop downloading and just
// keep seeding.
set_upload_mode(true);
return;
}
// put the torrent in an error-state
set_error(error.ec, error.file());
// if the error appears to be more serious than a full disk, just pause the torrent
pause();
}
void torrent::handle_inconsistent_hashes(piece_index_t const piece)
{
auto const file_slices = torrent_file().map_block(piece, 0, 0);
file_index_t const file = file_slices.empty() ? torrent_status::error_file_none : file_slices[0].file_index;
set_error(errors::torrent_inconsistent_hashes, file);
// if this is a hybrid torrent, we may have marked some more pieces
// as "have" but not yet validated them against the v2 hashes. At
// this point, just assume we have no pieces
m_picker.reset();
m_hash_picker.reset();
m_file_progress.clear();
m_have_all = false;
update_gauge();
pause();
}
void torrent::on_piece_fail_sync(piece_index_t const piece, piece_block) try
{
if (m_abort) return;
| ||
relevance 1 | ../src/torrent.cpp:8371 | should disconnect all peers that have the pieces we have not just seeds. It would be pretty expensive to check all pieces for all peers though |
should disconnect all peers that have the pieces we have
not just seeds. It would be pretty expensive to check all pieces
for all peers though../src/torrent.cpp:8371
set_state(torrent_status::finished);
set_queue_position(no_pos);
m_became_finished = aux::time_now32();
// we have to call completed() before we start
// disconnecting peers, since there's an assert
// to make sure we're cleared the piece picker
if (is_seed()) completed();
send_upload_only();
state_updated();
if (m_completed_time == 0)
m_completed_time = time(nullptr);
// disconnect all seeds
if (settings().get_bool(settings_pack::close_redundant_connections))
{
std::vector<peer_connection*> seeds;
for (auto const p : m_connections)
{
TORRENT_INCREMENT(m_iterating_connections);
TORRENT_ASSERT(p->associated_torrent().lock().get() == this);
if (p->upload_only() && p->can_disconnect(errors::torrent_finished))
{
#ifndef TORRENT_DISABLE_LOGGING
p->peer_log(peer_log_alert::info, "SEED", "CLOSING CONNECTION");
#endif
seeds.push_back(p);
}
}
for (auto& p : seeds)
p->disconnect(errors::torrent_finished, operation_t::bittorrent
, peer_connection_interface::normal);
}
if (m_abort) return;
update_want_peers();
if (m_storage)
{
// we need to keep the object alive during this operation
m_ses.disk_thread().async_release_files(m_storage
, std::bind(&torrent::on_cache_flushed, shared_from_this(), false));
m_ses.deferred_submit_jobs();
}
// this torrent just completed downloads, which means it will fall
| ||
relevance 1 | ../src/session_impl.cpp:5705 | report the proper address of the router as the source IP of this vote of our external address, instead of the empty address |
report the proper address of the router as the source IP of
this vote of our external address, instead of the empty address../src/session_impl.cpp:5705 , listen_socket_handle const& ls)
{
TORRENT_ASSERT(is_single_thread());
listen_socket_t* listen_socket = ls.get();
// NOTE: don't assume that if ec != 0, the rest of the logic
// is not necessary, the ports still need to be set, in other
// words, don't early return without careful review of the
// remaining logic
if (ec && m_alerts.should_post<portmap_error_alert>())
{
m_alerts.emplace_alert<portmap_error_alert>(mapping
, transport, ec, listen_socket ? listen_socket->local_endpoint.address() : address());
}
if (!listen_socket) return;
if (!ec && !external_ip.is_unspecified())
{
listen_socket->external_address.cast_vote(external_ip, source_router, address());
}
// need to check whether this mapping is for one of session ports (it could also be a user mapping)
if ((proto == portmap_protocol::tcp) && (listen_socket->tcp_port_mapping[transport].mapping == mapping))
listen_socket->tcp_port_mapping[transport].port = port;
else if ((proto == portmap_protocol::udp) && (listen_socket->udp_port_mapping[transport].mapping == mapping))
listen_socket->udp_port_mapping[transport].port = port;
if (!ec && m_alerts.should_post<portmap_alert>())
{
m_alerts.emplace_alert<portmap_alert>(mapping, port
, transport, proto, listen_socket->local_endpoint.address());
}
}
#if TORRENT_ABI_VERSION == 1
session_status session_impl::status() const
{
// INVARIANT_CHECK;
TORRENT_ASSERT(is_single_thread());
session_status s;
s.optimistic_unchoke_counter = m_optimistic_unchoke_time_scaler;
s.unchoke_counter = m_unchoke_time_scaler;
s.num_dead_peers = int(m_undead_peers.size());
s.num_peers = int(m_stats_counters[counters::num_peers_connected]);
s.num_unchoked = int(m_stats_counters[counters::num_peers_up_unchoked_all]);
s.allowed_upload_slots = int(m_stats_counters[counters::num_unchoke_slots]);
| ||
relevance 1 | ../include/libtorrent/ip_voter.hpp:130 | have one instance per possible subnet, 192.168.x.x, 10.x.x.x, etc. |
have one instance per possible subnet, 192.168.x.x, 10.x.x.x, etc.../include/libtorrent/ip_voter.hpp:130 // stores one address for each combination of local/global and ipv4/ipv6
// use of this class should be avoided, get the IP from the appropriate
// listen interface wherever possible
struct TORRENT_EXTRA_EXPORT external_ip
{
external_ip()
: m_addresses{{address_v4(), address_v6()}, {address_v4(), address_v6()}}
{}
external_ip(address const& local4, address const& global4
, address const& local6, address const& global6);
// the external IP as it would be observed from `ip`
address external_address(address const& ip) const;
private:
// support one local and one global address per address family
// [0][n] = global [1][n] = local
// [n][0] = IPv4 [n][1] = IPv6
address m_addresses[2][2];
};
}
#endif
| ||
relevance 0 | ../test/test_dht.cpp:472 | check to make sure the "best" items are stored |
check to make sure the "best" items are stored../test/test_dht.cpp:472 , msg_args().target(items[j].target));
key_desc_t const desc[] =
{
{ "r", bdecode_node::dict_t, 0, key_desc_t::parse_children },
{ "v", bdecode_node::dict_t, 0, 0},
{ "id", bdecode_node::string_t, 20, key_desc_t::last_child},
{ "y", bdecode_node::string_t, 1, 0},
};
bdecode_node parsed[4];
char error_string[200];
int ret = verify_message(response, desc, parsed, error_string);
if (ret)
{
items_num.insert(items_num.begin(), j);
}
}
TEST_EQUAL(items_num.size(), 4);
}
int sum_distance_exp(int s, node_entry const& e, node_id const& ref)
{
return s + distance_exp(e.id, ref);
}
std::vector<tcp::endpoint> g_got_peers;
void get_peers_cb(std::vector<tcp::endpoint> const& peers)
{
g_got_peers.insert(g_got_peers.end(), peers.begin(), peers.end());
}
std::vector<dht::item> g_got_items;
dht::item g_put_item;
int g_put_count;
void get_mutable_item_cb(dht::item const& i, bool a)
{
if (!a) return;
if (!i.empty())
g_got_items.push_back(i);
}
void put_mutable_item_data_cb(dht::item& i)
{
if (!i.empty())
g_got_items.push_back(i);
| ||
relevance 0 | ../test/test_dht.cpp:3223 | this won't work because the second node isn't pinged so it wont be added to the routing table |
this won't work because the second node isn't pinged so it wont
be added to the routing table../test/test_dht.cpp:3223 bool ret = verify_message(request, get_item_desc_ro, parsed, error_string);
TEST_CHECK(ret);
TEST_EQUAL(parsed[3].int_value(), 1);
// should have one node now, which is 4.4.4.4:1234
TEST_EQUAL(std::get<0>(node.size()), 1);
// and no replacement nodes
TEST_EQUAL(std::get<1>(node.size()), 0);
// now, disable read_only, try again.
g_sent_packets.clear();
sett.set_bool(settings_pack::dht_read_only, false);
send_dht_request(node, "get", source, &response);
// sender should be added to replacement bucket
TEST_EQUAL(std::get<1>(node.size()), 1);
g_sent_packets.clear();
#if 0
target = generate_next();
node.get_item(target, get_immutable_item_cb);
// since we have 2 nodes, we should have two packets.
TEST_EQUAL(g_sent_packets.size(), 2);
// both of them shouldn't have a 'ro' key.
node_from_entry(g_sent_packets.front().second, request);
ret = verify_message(request, get_item_desc_ro, parsed, error_string);
TEST_CHECK(ret);
TEST_CHECK(!parsed[3]);
node_from_entry(g_sent_packets.back().second, request);
ret = verify_message(request, get_item_desc_ro, parsed, error_string);
TEST_CHECK(ret);
TEST_CHECK(!parsed[3]);
#endif
}
#ifndef TORRENT_DISABLE_LOGGING
// these tests rely on logging being enabled
TORRENT_TEST(invalid_error_msg)
{
| ||
relevance 0 | ../test/test_dht.cpp:4081 | test obfuscated_get_peers |
test obfuscated_get_peers../test/test_dht.cpp:4081 TEST_CHECK(sm.has_quota());
});
}
TORRENT_TEST(rate_limit_accrue_limit)
{
aux::session_settings sett;
sett.set_int(settings_pack::dht_upload_rate_limit, std::numeric_limits<int>::max());
test_rate_limit(sett, [](lt::dht::socket_manager& sm) {
TEST_CHECK(sm.has_quota());
for (int i = 0; i < 10; ++i)
{
std::this_thread::sleep_for(milliseconds(500));
TEST_CHECK(sm.has_quota());
}
});
}
#else
TORRENT_TEST(dht)
{
// dummy dht test
TEST_CHECK(true);
}
#endif
| ||
relevance 0 | ../test/test_resume.cpp:582 | test what happens when loading a resume file with both piece priorities and file priorities (file prio should take precedence) |
test what happens when loading a resume file with both piece priorities
and file priorities (file prio should take precedence)../test/test_resume.cpp:582 {
TEST_EQUAL(pieces[i], true);
}
}
}
} // anonymous namespace
TORRENT_TEST(piece_slots_seed)
{
test_piece_slots_seed(settings());
}
TORRENT_TEST(piece_slots_seed_suggest_cache)
{
settings_pack sett = settings();
sett.set_int(settings_pack::suggest_mode, settings_pack::suggest_read_cache);
test_piece_slots_seed(sett);
}
| ||
relevance 0 | ../test/test_resume.cpp:585 | make sure a resume file only ever contain file priorities OR piece priorities. Never both. |
make sure a resume file only ever contain file priorities OR piece
priorities. Never both.../test/test_resume.cpp:585 TEST_EQUAL(pieces[i], true);
}
}
}
} // anonymous namespace
TORRENT_TEST(piece_slots_seed)
{
test_piece_slots_seed(settings());
}
TORRENT_TEST(piece_slots_seed_suggest_cache)
{
settings_pack sett = settings();
sett.set_int(settings_pack::suggest_mode, settings_pack::suggest_read_cache);
test_piece_slots_seed(sett);
}
| ||
relevance 0 | ../test/test_resume.cpp:588 | generally save |
generally save../test/test_resume.cpp:588 }
}
}
} // anonymous namespace
TORRENT_TEST(piece_slots_seed)
{
test_piece_slots_seed(settings());
}
TORRENT_TEST(piece_slots_seed_suggest_cache)
{
settings_pack sett = settings();
sett.set_int(settings_pack::suggest_mode, settings_pack::suggest_read_cache);
test_piece_slots_seed(sett);
}
#if TORRENT_ABI_VERSION == 1
TORRENT_TEST(file_priorities_default_deprecated)
{
lt::session ses(settings());
std::vector<download_priority_t> file_priorities = test_resume_flags(ses
, {}, "", "", true).get_file_priorities();
TEST_EQUAL(file_priorities.size(), 3);
TEST_EQUAL(file_priorities[0], 4);
TEST_EQUAL(file_priorities[1], 4);
TEST_EQUAL(file_priorities[2], 4);
}
// As long as the add_torrent_params priorities are empty, the file_priorities
// from the resume data should take effect
TORRENT_TEST(file_priorities_in_resume_deprecated)
{
lt::session ses(settings());
std::vector<download_priority_t> file_priorities = test_resume_flags(ses, {}, "", "123").get_file_priorities();
TEST_EQUAL(file_priorities.size(), 3);
TEST_EQUAL(file_priorities[0], 1);
TEST_EQUAL(file_priorities[1], 2);
TEST_EQUAL(file_priorities[2], 3);
}
// if both resume data and add_torrent_params has file_priorities, the
// add_torrent_params one take precedence
TORRENT_TEST(file_priorities_in_resume_and_params_deprecated)
{
| ||
relevance 0 | ../test/test_resume.cpp:911 | test all other resume flags here too. This would require returning more than just the torrent_status from test_resume_flags. Also http seeds and trackers for instance |
test all other resume flags here too. This would require returning
more than just the torrent_status from test_resume_flags. Also http seeds
and trackers for instance../test/test_resume.cpp:911 TEST_EQUAL(s.connections_limit, 1345);
TEST_EQUAL(s.uploads_limit, 1346);
}
TORRENT_TEST(paused_deprecated)
{
lt::session ses(settings());
// resume data overrides the paused flag
auto const now = lt::clock_type::now();
torrent_status s = test_resume_flags(ses, torrent_flags::paused, "", "", true).status();
default_tests(s, now);
#ifdef TORRENT_WINDOWS
TEST_EQUAL(s.save_path, "c:\\add_torrent_params save_path");
#else
TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
#endif
TEST_EQUAL(s.flags & flags_mask, torrent_flags_t{});
TEST_EQUAL(s.connections_limit, 1345);
TEST_EQUAL(s.uploads_limit, 1346);
}
TORRENT_TEST(url_seed_resume_data_deprecated)
{
// merge url seeds with resume data
std::printf("flags: merge_resume_http_seeds\n");
lt::session ses(settings());
torrent_handle h = test_resume_flags(ses,
torrent_flags::merge_resume_http_seeds, "", "", true);
std::set<std::string> us = h.url_seeds();
std::set<std::string> ws = h.http_seeds();
TEST_EQUAL(us.size(), 3);
TEST_EQUAL(std::count(us.begin(), us.end()
, "http://add_torrent_params_url_seed.com/"), 1);
TEST_EQUAL(std::count(us.begin(), us.end()
, "http://torrent_file_url_seed.com/"), 1);
TEST_EQUAL(std::count(us.begin(), us.end()
, "http://resume_data_url_seed.com/"), 1);
TEST_EQUAL(ws.size(), 1);
TEST_EQUAL(std::count(ws.begin(), ws.end()
, "http://resume_data_http_seed.com"), 1);
}
TORRENT_TEST(resume_override_torrent_deprecated)
{
// resume data overrides the .torrent_file
std::printf("flags: no merge_resume_http_seed\n");
lt::session ses(settings());
torrent_handle h = test_resume_flags(ses,
| ||
relevance 0 | ../test/test_resume.cpp:1683 | test all other resume flags here too. This would require returning more than just the torrent_status from test_resume_flags. Also http seeds and trackers for instance |
test all other resume flags here too. This would require returning
more than just the torrent_status from test_resume_flags. Also http seeds
and trackers for instance../test/test_resume.cpp:1683 TEST_EQUAL(s.connections_limit, 1345);
TEST_EQUAL(s.uploads_limit, 1346);
}
TORRENT_TEST(paused)
{
lt::session ses(settings());
// resume data overrides the paused flag
auto const now = lt::clock_type::now();
torrent_status s = test_resume_flags(ses, torrent_flags::paused).status();
default_tests(s, now);
#ifdef TORRENT_WINDOWS
TEST_EQUAL(s.save_path, "c:\\add_torrent_params save_path");
#else
TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
#endif
TEST_EQUAL(s.flags & flags_mask, torrent_flags::paused);
TEST_EQUAL(s.connections_limit, 1345);
TEST_EQUAL(s.uploads_limit, 1346);
}
TORRENT_TEST(no_metadata)
{
lt::session ses(settings());
add_torrent_params p;
p.info_hashes.v1 = sha1_hash("abababababababababab");
p.save_path = ".";
p.name = "foobar";
torrent_handle h = ses.add_torrent(p);
h.save_resume_data(torrent_handle::save_info_dict);
alert const* a = wait_for_alert(ses, save_resume_data_alert::alert_type);
TEST_CHECK(a);
save_resume_data_alert const* ra = alert_cast<save_resume_data_alert>(a);
TEST_CHECK(ra);
if (ra)
{
auto const& atp = ra->params;
TEST_EQUAL(atp.info_hashes, p.info_hashes);
TEST_EQUAL(atp.name, "foobar");
}
}
template <typename Fun>
void test_unfinished_pieces(Fun f)
{
// create a torrent and complete files
std::shared_ptr<torrent_info> ti = generate_torrent(true, true);
add_torrent_params p;
| ||
relevance 0 | ../test/test_torrent_info.cpp:459 | test remap_files |
test remap_files../test/test_torrent_info.cpp:459 | ||
relevance 0 | ../test/test_torrent_info.cpp:460 | torrent with 'p' (padfile) attribute |
torrent with 'p' (padfile) attribute../test/test_torrent_info.cpp:460 | ||
relevance 0 | ../test/test_torrent_info.cpp:461 | torrent with 'h' (hidden) attribute |
torrent with 'h' (hidden) attribute../test/test_torrent_info.cpp:461 | ||
relevance 0 | ../test/test_torrent_info.cpp:462 | torrent with 'x' (executable) attribute |
torrent with 'x' (executable) attribute../test/test_torrent_info.cpp:462 | ||
relevance 0 | ../test/test_torrent_info.cpp:463 | torrent with 'l' (symlink) attribute |
torrent with 'l' (symlink) attribute../test/test_torrent_info.cpp:463 | ||
relevance 0 | ../test/test_torrent_info.cpp:464 | torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once) |
torrent with multiple trackers in multiple tiers, making sure we
shuffle them (how do you test shuffling?, load it multiple times and make
sure it's in different order at least once)../test/test_torrent_info.cpp:464 | ||
relevance 0 | ../test/test_torrent_info.cpp:467 | torrents with a zero-length name |
torrents with a zero-length name../test/test_torrent_info.cpp:467 | ||
relevance 0 | ../test/test_torrent_info.cpp:468 | torrent with a non-dictionary info-section |
torrent with a non-dictionary info-section../test/test_torrent_info.cpp:468 | ||
relevance 0 | ../test/test_torrent_info.cpp:469 | torrents with DHT nodes |
torrents with DHT nodes../test/test_torrent_info.cpp:469 | ||
relevance 0 | ../test/test_torrent_info.cpp:470 | torrent with url-list as a single string |
torrent with url-list as a single string../test/test_torrent_info.cpp:470 | ||
relevance 0 | ../test/test_torrent_info.cpp:471 | torrent with http seed as a single string |
torrent with http seed as a single string../test/test_torrent_info.cpp:471 | ||
relevance 0 | ../test/test_torrent_info.cpp:472 | torrent with a comment |
torrent with a comment../test/test_torrent_info.cpp:472 | ||
relevance 0 | ../test/test_torrent_info.cpp:473 | torrent with an SSL cert |
torrent with an SSL cert../test/test_torrent_info.cpp:473 | ||
relevance 0 | ../test/test_torrent_info.cpp:474 | torrent with attributes (executable and hidden) |
torrent with attributes (executable and hidden)../test/test_torrent_info.cpp:474 | ||
relevance 0 | ../test/test_torrent_info.cpp:475 | torrent_info constructor that takes an invalid bencoded buffer |
torrent_info constructor that takes an invalid bencoded buffer../test/test_torrent_info.cpp:475 | ||
relevance 0 | ../test/test_torrent_info.cpp:476 | verify_encoding with a string that triggers character replacement |
verify_encoding with a string that triggers character replacement../test/test_torrent_info.cpp:476 { "v2_non_multiple_piece_layer.torrent", errors::torrent_invalid_piece_layer},
{ "v2_piece_layer_invalid_file_hash.torrent", errors::torrent_invalid_piece_layer},
{ "v2_invalid_piece_layer.torrent", errors::torrent_invalid_piece_layer},
{ "v2_invalid_piece_layer_root.torrent", errors::torrent_invalid_piece_layer},
{ "v2_unknown_piece_layer_entry.torrent", errors::torrent_invalid_piece_layer},
{ "v2_invalid_piece_layer_size.torrent", errors::torrent_invalid_piece_layer},
{ "v2_bad_file_alignment.torrent", errors::torrent_inconsistent_files},
{ "v2_unordered_files.torrent", errors::invalid_bencoding},
{ "v2_overlong_integer.torrent", errors::invalid_bencoding},
{ "v2_missing_file_root_invalid_symlink.torrent", errors::torrent_missing_pieces_root},
{ "v2_large_file.torrent", errors::torrent_invalid_length},
{ "v2_large_offset.torrent", errors::too_many_pieces_in_torrent},
{ "v2_piece_size.torrent", errors::torrent_missing_piece_length},
{ "v2_invalid_pad_file.torrent", errors::torrent_invalid_pad_file},
{ "v2_zero_root.torrent", errors::torrent_missing_pieces_root},
{ "v2_zero_root_small.torrent", errors::torrent_missing_pieces_root},
};
} // anonymous namespace
TORRENT_TEST(add_tracker)
{
torrent_info ti(info_hash_t(sha1_hash(" ")));
TEST_EQUAL(ti.trackers().size(), 0);
ti.add_tracker("http://test.com/announce");
TEST_EQUAL(ti.trackers().size(), 1);
announce_entry ae = ti.trackers()[0];
TEST_EQUAL(ae.url, "http://test.com/announce");
ti.clear_trackers();
TEST_EQUAL(ti.trackers().size(), 0);
}
TORRENT_TEST(url_list_and_httpseeds)
{
entry info;
info["pieces"] = "aaaaaaaaaaaaaaaaaaaa";
info["name.utf-8"] = "test1";
info["name"] = "test__";
info["piece length"] = 16 * 1024;
info["length"] = 3245;
entry::list_type l;
l.push_back(entry("http://foo.com/bar1"));
l.push_back(entry("http://foo.com/bar1"));
l.push_back(entry("http://foo.com/bar2"));
entry const e(l);
entry torrent;
torrent["url-list"] = e;
| ||
relevance 0 | ../test/test_ssl.cpp:407 | test using a signed certificate with the wrong info-hash in DN |
test using a signed certificate with the wrong info-hash in DN../test/test_ssl.cpp:407 // in verifying peers
ctx.set_verify_mode(context::verify_none, ec);
if (ec)
{
std::printf("Failed to set SSL verify mode: %s\n"
, ec.message().c_str());
TEST_CHECK(!ec);
return false;
}
std::string certificate = combine_path("..", combine_path("ssl", "peer_certificate.pem"));
std::string private_key = combine_path("..", combine_path("ssl", "peer_private_key.pem"));
std::string dh_params = combine_path("..", combine_path("ssl", "dhparams.pem"));
if (flags & invalid_certificate)
{
certificate = combine_path("..", combine_path("ssl", "invalid_peer_certificate.pem"));
private_key = combine_path("..", combine_path("ssl", "invalid_peer_private_key.pem"));
}
if (flags & (valid_certificate | invalid_certificate))
{
std::printf("set_password_callback\n");
ctx.set_password_callback(
[](std::size_t, context::password_purpose) { return "test"; }
, ec);
if (ec)
{
std::printf("Failed to set certificate passphrase: %s\n"
, ec.message().c_str());
TEST_CHECK(!ec);
return false;
}
std::printf("use_certificate_file \"%s\"\n", certificate.c_str());
ctx.use_certificate_file(certificate, context::pem, ec);
if (ec)
{
std::printf("Failed to set certificate file: %s\n"
, ec.message().c_str());
TEST_CHECK(!ec);
return false;
}
std::printf("use_private_key_file \"%s\"\n", private_key.c_str());
ctx.use_private_key_file(private_key, context::pem, ec);
if (ec)
{
std::printf("Failed to set private key: %s\n"
, ec.message().c_str());
TEST_CHECK(!ec);
return false;
| ||
relevance 0 | ../test/test_ssl.cpp:509 | also test using a hash that refers to a valid torrent but that differs from the SNI hash |
also test using a hash that refers to a valid torrent
but that differs from the SNI hash../test/test_ssl.cpp:509 print_alerts(ses1, "ses1", true, true, &on_alert);
if (ec)
{
std::printf("Failed SSL handshake: %s\n"
, ec.message().c_str());
return false;
}
char handshake[] = "\x13" "BitTorrent protocol\0\0\0\0\0\0\0\x04"
" " // space for info-hash
"aaaaaaaaaaaaaaaaaaaa" // peer-id
"\0\0\0\x01\x02"; // interested
// fill in the info-hash
if (flags & valid_bittorrent_hash)
{
std::memcpy(handshake + 28, &t->info_hashes().v1[0], 20);
}
else
{
std::generate(handshake + 28, handshake + 48, &rand);
}
// fill in the peer-id
std::generate(handshake + 48, handshake + 68, &rand);
std::printf("bittorrent handshake\n");
boost::asio::write(ssl_sock, boost::asio::buffer(handshake, (sizeof(handshake) - 1)), ec);
print_alerts(ses1, "ses1", true, true, &on_alert);
if (ec)
{
std::printf("failed to write bittorrent handshake: %s\n"
, ec.message().c_str());
return false;
}
char buf[68];
std::printf("read bittorrent handshake\n");
boost::asio::read(ssl_sock, boost::asio::buffer(buf, sizeof(buf)), ec);
print_alerts(ses1, "ses1", true, true, &on_alert);
if (ec)
{
std::printf("failed to read bittorrent handshake: %s\n"
, ec.message().c_str());
return false;
}
if (memcmp(buf, "\x13" "BitTorrent protocol", 20) != 0)
{
std::printf("invalid bittorrent handshake\n");
return false;
| ||
relevance 0 | ../test/test_timestamp_history.cpp:54 | test the case where we have > 120 samples (and have the base delay actually be updated) |
test the case where we have > 120 samples (and have the base delay actually be updated)../test/test_timestamp_history.cpp:54 | ||
relevance 0 | ../test/test_timestamp_history.cpp:55 | test the case where a sample is lower than the history entry but not lower than the base |
test the case where a sample is lower than the history entry but not lower than the base../test/test_timestamp_history.cpp:55#include "libtorrent/aux_/timestamp_history.hpp"
TORRENT_TEST(timestamp_history)
{
using namespace lt;
aux::timestamp_history h;
TEST_EQUAL(h.add_sample(0x32, false), 0);
TEST_EQUAL(h.base(), 0x32);
TEST_EQUAL(h.add_sample(0x33, false), 0x1);
TEST_EQUAL(h.base(), 0x32);
TEST_EQUAL(h.add_sample(0x3433, false), 0x3401);
TEST_EQUAL(h.base(), 0x32);
TEST_EQUAL(h.add_sample(0x30, false), 0);
TEST_EQUAL(h.base(), 0x30);
// test that wrapping of the timestamp is properly handled
h.add_sample(0xfffffff3, false);
TEST_EQUAL(h.base(), 0xfffffff3);
}
| ||
relevance 0 | ../test/test_resolve_links.cpp:95 | test files with different piece size (negative test) |
test files with different piece size (negative test)../test/test_resolve_links.cpp:95 { "test2", "test1_pad_files", 0},
{ "test3", "test1_pad_files", 0},
{ "test2", "test1_single", 0},
// these are all padded. The first small file will accidentally also
// match, even though it's not tail padded, the following file is identical
{ "test2_pad_files", "test1_pad_files", 2},
{ "test3_pad_files", "test1_pad_files", 2},
{ "test3_pad_files", "test2_pad_files", 2},
{ "test1_pad_files", "test2_pad_files", 2},
{ "test1_pad_files", "test3_pad_files", 2},
{ "test2_pad_files", "test3_pad_files", 2},
// one might expect this to work, but since the tail of the single file
// torrent is not padded, the last piece hash won't match
{ "test1_pad_files", "test1_single", 0},
// if it's padded on the other hand, it will work
{ "test1_pad_files", "test1_single_padded", 1},
};
| ||
relevance 0 | ../test/test_resolve_links.cpp:98 | it would be nice to test resolving of more than just 2 files as well. like 3 single file torrents merged into one, resolving all 3 files. |
it would be nice to test resolving of more than just 2 files as well.
like 3 single file torrents merged into one, resolving all 3 files.../test/test_resolve_links.cpp:98 { "test2", "test1_single", 0},
// these are all padded. The first small file will accidentally also
// match, even though it's not tail padded, the following file is identical
{ "test2_pad_files", "test1_pad_files", 2},
{ "test3_pad_files", "test1_pad_files", 2},
{ "test3_pad_files", "test2_pad_files", 2},
{ "test1_pad_files", "test2_pad_files", 2},
{ "test1_pad_files", "test3_pad_files", 2},
{ "test2_pad_files", "test3_pad_files", 2},
// one might expect this to work, but since the tail of the single file
// torrent is not padded, the last piece hash won't match
{ "test1_pad_files", "test1_single", 0},
// if it's padded on the other hand, it will work
{ "test1_pad_files", "test1_single_padded", 1},
};
TORRENT_TEST(resolve_links)
{
std::string path = combine_path(parent_path(current_working_directory())
, "mutable_test_torrents");
for (int i = 0; i < int(sizeof(test_torrents)/sizeof(test_torrents[0])); ++i)
{
test_torrent_t const& e = test_torrents[i];
std::string p = combine_path(path, e.filename1) + ".torrent";
std::printf("loading %s\n", p.c_str());
std::shared_ptr<torrent_info> ti1 = std::make_shared<torrent_info>(p);
p = combine_path(path, e.filename2) + ".torrent";
std::printf("loading %s\n", p.c_str());
std::shared_ptr<torrent_info> ti2 = std::make_shared<torrent_info>(p);
std::printf("resolving\n");
resolve_links l(ti1);
l.match(ti2, ".");
aux::vector<resolve_links::link_t, file_index_t> const& links = l.get_links();
auto const num_matches = std::size_t(std::count_if(links.begin(), links.end()
, std::bind(&resolve_links::link_t::ti, _1)));
// some debug output in case the test fails
if (num_matches > e.expected_matches)
{
file_storage const& fs = ti1->files();
| ||
relevance 0 | ../test/test_fast_extension.cpp:1135 | test sending invalid requests (out of bound piece index, offsets and sizes) |
test sending invalid requests (out of bound piece index, offsets and
sizes)../test/test_fast_extension.cpp:1135 | ||
relevance 0 | ../test/test_tracker.cpp:60 | test scrape requests |
test scrape requests../test/test_tracker.cpp:60 | ||
relevance 0 | ../test/test_tracker.cpp:61 | test parse peers6 |
test parse peers6../test/test_tracker.cpp:61 | ||
relevance 0 | ../test/test_tracker.cpp:62 | test parse tracker-id |
test parse tracker-id../test/test_tracker.cpp:62 | ||
relevance 0 | ../test/test_tracker.cpp:63 | test parse failure-reason |
test parse failure-reason../test/test_tracker.cpp:63 | ||
relevance 0 | ../test/test_tracker.cpp:64 | test all failure paths, including invalid bencoding not a dictionary no files entry in scrape response no info-hash entry in scrape response malformed peers in peer list of dictionaries uneven number of bytes in peers and peers6 string responses |
test all failure paths, including
invalid bencoding
not a dictionary
no files entry in scrape response
no info-hash entry in scrape response
malformed peers in peer list of dictionaries
uneven number of bytes in peers and peers6 string responses../test/test_tracker.cpp:64#include "test_utils.hpp"
#include "udp_tracker.hpp"
#include "settings.hpp"
#include "test_utils.hpp"
#include "libtorrent/alert.hpp"
#include "libtorrent/peer_info.hpp" // for peer_list_entry
#include "libtorrent/alert_types.hpp"
#include "libtorrent/session.hpp"
#include "libtorrent/session_params.hpp"
#include "libtorrent/error_code.hpp"
#include "libtorrent/tracker_manager.hpp"
#include "libtorrent/http_tracker_connection.hpp" // for parse_tracker_response
#include "libtorrent/torrent_info.hpp"
#include "libtorrent/announce_entry.hpp"
#include "libtorrent/torrent.hpp"
#include "libtorrent/aux_/path.hpp"
#include "libtorrent/socket_io.hpp"
using namespace lt;
TORRENT_TEST(parse_hostname_peers)
{
char const response[] = "d5:peersld7:peer id20:aaaaaaaaaaaaaaaaaaaa"
"2:ip13:test_hostname4:porti1000eed"
"7:peer id20:bbbbabaababababababa2:ip12:another_host4:porti1001eeee";
error_code ec;
tracker_response resp = parse_tracker_response(response
, ec, {}, sha1_hash());
TEST_EQUAL(ec, error_code());
TEST_EQUAL(resp.peers.size(), 2);
if (resp.peers.size() == 2)
{
peer_entry const& e0 = resp.peers[0];
peer_entry const& e1 = resp.peers[1];
TEST_EQUAL(e0.hostname, "test_hostname");
TEST_EQUAL(e0.port, 1000);
TEST_EQUAL(e0.pid, peer_id("aaaaaaaaaaaaaaaaaaaa"));
TEST_EQUAL(e1.hostname, "another_host");
TEST_EQUAL(e1.port, 1001);
TEST_EQUAL(e1.pid, peer_id("bbbbabaababababababa"));
}
}
TORRENT_TEST(parse_peers4)
{
char const response[] = "d5:peers12:\x01\x02\x03\x04\x30\x10"
"\x09\x08\x07\x06\x20\x10" "e";
error_code ec;
| ||
relevance 0 | ../test/test_flags.cpp:163 | change to a different test setup. currently always paused. test_set_after_add(torrent_flags::paused); test_unset_after_add(torrent_flags::paused); |
change to a different test setup. currently always paused.
test_set_after_add(torrent_flags::paused);
test_unset_after_add(torrent_flags::paused);../test/test_flags.cpp:163{
// share-mode
test_add_and_get_flags(torrent_flags::share_mode);
test_set_after_add(torrent_flags::share_mode);
test_unset_after_add(torrent_flags::share_mode);
}
#endif
TORRENT_TEST(flag_apply_ip_filter)
{
// apply-ip-filter
test_add_and_get_flags(torrent_flags::apply_ip_filter);
test_set_after_add(torrent_flags::apply_ip_filter);
test_unset_after_add(torrent_flags::apply_ip_filter);
}
TORRENT_TEST(flag_paused)
{
// paused
test_add_and_get_flags(torrent_flags::paused);
}
TORRENT_TEST(flag_auto_managed)
{
// auto-managed
test_add_and_get_flags(torrent_flags::auto_managed);
test_set_after_add(torrent_flags::auto_managed);
test_unset_after_add(torrent_flags::auto_managed);
}
// super seeding mode is automatically turned off if we're not a seed
// since the posix_disk_io is not threaded, this will happen immediately
#if TORRENT_HAVE_MMAP
#ifndef TORRENT_DISABLE_SUPERSEEDING
TORRENT_TEST(flag_super_seeding)
{
// super-seeding
test_add_and_get_flags(torrent_flags::super_seeding);
test_unset_after_add(torrent_flags::super_seeding);
test_set_after_add(torrent_flags::super_seeding);
}
#endif
#endif
TORRENT_TEST(flag_sequential_download)
{
// sequential-download
test_add_and_get_flags(torrent_flags::sequential_download);
test_set_after_add(torrent_flags::sequential_download);
test_unset_after_add(torrent_flags::sequential_download);
}
| ||
relevance 0 | ../test/test_flags.cpp:205 | this test is flaky, since the torrent will become ready before asking for the flags, and by then stop_when_ready will have been cleared test_add_and_get_flags(torrent_flags::stop_when_ready); setting stop-when-ready when already stopped has no effect. |
this test is flaky, since the torrent will become ready before
asking for the flags, and by then stop_when_ready will have been cleared
test_add_and_get_flags(torrent_flags::stop_when_ready);
setting stop-when-ready when already stopped has no effect.../test/test_flags.cpp:205 | ||
relevance 0 | ../test/test_flags.cpp:209 | change to a different test setup. currently always paused. test_set_after_add(torrent_flags::stop_when_ready); |
change to a different test setup. currently always paused.
test_set_after_add(torrent_flags::stop_when_ready);../test/test_flags.cpp:209 test_set_after_add(torrent_flags::super_seeding);
}
#endif
#endif
TORRENT_TEST(flag_sequential_download)
{
// sequential-download
test_add_and_get_flags(torrent_flags::sequential_download);
test_set_after_add(torrent_flags::sequential_download);
test_unset_after_add(torrent_flags::sequential_download);
}
// the stop when ready flag will be cleared when the torrent is ready to start
// downloading.
// since the posix_disk_io is not threaded, this will happen immediately
#if TORRENT_HAVE_MMAP
TORRENT_TEST(flag_stop_when_ready)
{
// stop-when-ready
test_unset_after_add(torrent_flags::stop_when_ready);
}
#endif
TORRENT_TEST(flag_disable_dht)
{
test_add_and_get_flags(torrent_flags::disable_dht);
test_set_after_add(torrent_flags::disable_dht);
test_unset_after_add(torrent_flags::disable_dht);
}
TORRENT_TEST(flag_disable_lsd)
{
test_add_and_get_flags(torrent_flags::disable_lsd);
test_set_after_add(torrent_flags::disable_lsd);
test_unset_after_add(torrent_flags::disable_lsd);
}
TORRENT_TEST(flag_disable_pex)
{
test_add_and_get_flags(torrent_flags::disable_pex);
test_set_after_add(torrent_flags::disable_pex);
test_unset_after_add(torrent_flags::disable_pex);
}
| ||
relevance 0 | ../test/test_merkle_tree.cpp:233 | use structured bindings in C++17 |
use structured bindings in C++17../test/test_merkle_tree.cpp:233 mask[std::size_t(i)] = true;
t.load_sparse_tree(span<sha256_hash const>(f).subspan(first_piece, num_pieces), mask, empty_verified);
int const end_piece_layer = first_piece + merkle_num_leafs(num_pieces);
for (int i = 0; i < end_piece_layer; ++i)
{
TEST_CHECK(t.has_node(i));
TEST_CHECK(t.compare_node(i, f[i]));
}
for (int i = end_piece_layer; i < num_nodes; ++i)
{
TEST_CHECK(!t.has_node(i));
}
}
}
namespace {
void test_roundtrip(aux::merkle_tree const& t
, int const block_count
, int const blocks_per_piece)
{
aux::vector<bool> mask;
std::vector<sha256_hash> tree;
std::tie(tree, mask) = t.build_sparse_vector();
aux::merkle_tree t2(block_count, blocks_per_piece, f[0].data());
t2.load_sparse_tree(tree, mask, empty_verified);
TEST_CHECK(t.build_vector() == t2.build_vector());
for (int i = 0; i < int(t.size()); ++i)
{
TEST_EQUAL(t[i], t2[i]);
TEST_EQUAL(t.has_node(i), t2.has_node(i));
if (!t.has_node(i))
TEST_CHECK(t[i].is_all_zeros());
if (!t2.has_node(i))
TEST_CHECK(t2[i].is_all_zeros());
TEST_CHECK(t.compare_node(i, t2[i]));
TEST_CHECK(t2.compare_node(i, t[i]));
}
}
}
TORRENT_TEST(roundtrip_empty_tree)
{
aux::merkle_tree t(num_blocks, 1, f[0].data());
test_roundtrip(t, num_blocks, 1);
}
TORRENT_TEST(roundtrip_full_tree)
| ||
relevance 0 | ../test/test_merkle_tree.cpp:938 | add test for load_piece_layer() |
add test for load_piece_layer()../test/test_merkle_tree.cpp:938 | ||
relevance 0 | ../test/test_merkle_tree.cpp:939 | add test for add_hashes() with an odd number of blocks |
add test for add_hashes() with an odd number of blocks../test/test_merkle_tree.cpp:939 | ||
relevance 0 | ../test/test_merkle_tree.cpp:940 | add test for set_block() (setting the last block) with an odd number of blocks |
add test for set_block() (setting the last block) with an odd number of blocks../test/test_merkle_tree.cpp:940
for (int i = 0; i < 7; ++i)
TEST_EQUAL(t[i], f[i]);
}
// use a proof that ties the first piece node 3 (since we don't need it all
// the way to the root).
auto const result = t.add_hashes(127, pdiff(1), range(f, 127, 4), build_proof(f, 31, 3));
TEST_CHECK(result);
auto const& res = *result;
TEST_EQUAL(res.passed.size(), 0);
TEST_EQUAL(res.failed.size(), 0);
for (int i = 127; i < 127 + 4; ++i)
TEST_CHECK(t[i] == f[i]);
TEST_CHECK(t.verified_leafs() == none_set(num_blocks));
}
| ||
relevance 0 | ../test/test_bloom_filter.cpp:135 | test size() |
test size()../test/test_bloom_filter.cpp:135 | ||
relevance 0 | ../test/test_bloom_filter.cpp:136 | test clear() |
test clear()../test/test_bloom_filter.cpp:136 sha1_hash k("\x01\x00\x02\x00 ");
TEST_CHECK(!filter.find(k));
filter.set(k);
TEST_CHECK(filter.find(k));
std::uint8_t compare[4] = { 0x16, 0xff, 0x55, 0xaa};
bits_out = filter.to_string();
TEST_EQUAL(memcmp(compare, bits_out.c_str(), 4), 0);
}
} // anonymous namespace
TORRENT_TEST(bloom_filter)
{
test_set_and_get();
test_set_bits();
test_count_zeroes();
test_to_from_string();
}
| ||
relevance 0 | ../test/test_peer_list.cpp:1241 | test erasing peers |
test erasing peers../test/test_peer_list.cpp:1241 | ||
relevance 0 | ../test/test_peer_list.cpp:1242 | test update_peer_port with allow_multiple_connections_per_ip and without |
test update_peer_port with allow_multiple_connections_per_ip and without../test/test_peer_list.cpp:1242 | ||
relevance 0 | ../test/test_peer_list.cpp:1243 | test add i2p peers |
test add i2p peers../test/test_peer_list.cpp:1243 | ||
relevance 0 | ../test/test_peer_list.cpp:1244 | test allow_i2p_mixed |
test allow_i2p_mixed../test/test_peer_list.cpp:1244 | ||
relevance 0 | ../test/test_peer_list.cpp:1245 | test insert_peer failing with all error conditions |
test insert_peer failing with all error conditions../test/test_peer_list.cpp:1245 | ||
relevance 0 | ../test/test_peer_list.cpp:1246 | test IPv6 |
test IPv6../test/test_peer_list.cpp:1246 | ||
relevance 0 | ../test/test_peer_list.cpp:1247 | test connect_to_peer() failing |
test connect_to_peer() failing../test/test_peer_list.cpp:1247 | ||
relevance 0 | ../test/test_peer_list.cpp:1248 | test connection_closed |
test connection_closed../test/test_peer_list.cpp:1248 | ||
relevance 0 | ../test/test_peer_list.cpp:1249 | connect candidates recalculation when incrementing failcount |
connect candidates recalculation when incrementing failcount../test/test_peer_list.cpp:1249 | ||
relevance 0 | ../test/test_file_storage.cpp:1208 | test file attributes |
test file attributes../test/test_file_storage.cpp:1208 | ||
relevance 0 | ../test/test_file_storage.cpp:1209 | test symlinks |
test symlinks../test/test_file_storage.cpp:1209 | ||
relevance 0 | ../test/test_upnp.cpp:156 | store the log and verify that some key messages are there |
store the log and verify that some key messages are there../test/test_upnp.cpp:156 , portmap_protocol const protocol, error_code const& err
, portmap_transport, aux::listen_socket_handle const&) override
{
callback_info info = {mapping, port, err};
callbacks.push_back(info);
std::cout << "mapping: " << static_cast<int>(mapping)
<< ", port: " << port << ", IP: " << ip
<< ", proto: " << static_cast<int>(protocol)
<< ", error: \"" << err.message() << "\"\n";
}
#ifndef TORRENT_DISABLE_LOGGING
bool should_log_portmap(portmap_transport) const override
{
return true;
}
void log_portmap(portmap_transport, char const* msg
, aux::listen_socket_handle const&) const override
{
std::cout << "UPnP: " << msg << std::endl;
}
#endif
};
ip_interface pick_upnp_interface()
{
lt::io_context ios;
error_code ec;
std::vector<ip_route> const routes = enum_routes(ios, ec);
if (ec)
{
std::cerr << "failed to enumerate routes: " << ec.message() << '\n';
TEST_CHECK(false);
return {};
}
std::vector<ip_interface> const ifs = enum_net_interfaces(ios, ec);
if (ec)
{
std::cerr << "failed to enumerate network interfaces: " << ec.message() << '\n';
TEST_CHECK(false);
return {};
}
int idx = 0;
for (auto const& face : ifs)
{
if (!face.interface_address.is_v4()) continue;
std::cout << " - " << idx
<< ' ' << face.interface_address.to_string()
<< ' ' << int(static_cast<std::uint8_t>(face.state))
<< ' ' << static_cast<std::uint32_t>(face.flags)
<< ' ' << face.name << '\n';
| ||
relevance 0 | ../test/test_transfer.cpp:166 | these settings_pack tests belong in their own test |
these settings_pack tests belong in their own test../test/test_transfer.cpp:166 // to the time it will take to complete the test
pack.set_int(settings_pack::min_reconnect_time, 0);
pack.set_int(settings_pack::stop_tracker_timeout, 1);
pack.set_bool(settings_pack::announce_to_all_trackers, true);
pack.set_bool(settings_pack::announce_to_all_tiers, true);
// make sure we announce to both http and udp trackers
pack.set_bool(settings_pack::prefer_udp_trackers, false);
pack.set_bool(settings_pack::enable_outgoing_utp, false);
pack.set_bool(settings_pack::enable_incoming_utp, false);
pack.set_bool(settings_pack::enable_lsd, false);
pack.set_bool(settings_pack::enable_natpmp, false);
pack.set_bool(settings_pack::enable_upnp, false);
pack.set_bool(settings_pack::enable_dht, false);
pack.set_int(settings_pack::out_enc_policy, settings_pack::pe_disabled);
pack.set_int(settings_pack::in_enc_policy, settings_pack::pe_disabled);
pack.set_bool(settings_pack::allow_multiple_connections_per_ip, false);
pack.set_int(settings_pack::unchoke_slots_limit, 0);
ses1.apply_settings(pack);
TEST_CHECK(ses1.get_settings().get_int(settings_pack::unchoke_slots_limit) == 0);
pack.set_int(settings_pack::unchoke_slots_limit, -1);
ses1.apply_settings(pack);
TEST_CHECK(ses1.get_settings().get_int(settings_pack::unchoke_slots_limit) == -1);
pack.set_int(settings_pack::unchoke_slots_limit, 8);
ses1.apply_settings(pack);
TEST_CHECK(ses1.get_settings().get_int(settings_pack::unchoke_slots_limit) == 8);
ses2.apply_settings(pack);
torrent_handle tor1;
torrent_handle tor2;
create_directory("tmp1_transfer", ec);
std::ofstream file("tmp1_transfer/temporary");
std::shared_ptr<torrent_info> t = ::create_torrent(&file, "temporary", 32 * 1024, 13, false);
file.close();
TEST_CHECK(exists(combine_path("tmp1_transfer", "temporary")));
add_torrent_params params;
params.storage_mode = storage_mode;
params.flags &= ~torrent_flags::paused;
params.flags &= ~torrent_flags::auto_managed;
wait_for_listen(ses1, "ses1");
wait_for_listen(ses2, "ses2");
| ||
relevance 0 | ../src/pe_crypto.cpp:60 | it would be nice to get the literal working |
it would be nice to get the literal working../src/pe_crypto.cpp:60#include <algorithm>
#include <random>
#include "libtorrent/aux_/disable_warnings_push.hpp"
#include <boost/multiprecision/integer.hpp>
#include <boost/multiprecision/cpp_int.hpp>
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#include "libtorrent/random.hpp"
#include "libtorrent/aux_/alloca.hpp"
#include "libtorrent/pe_crypto.hpp"
#include "libtorrent/hasher.hpp"
namespace libtorrent {
namespace mp = boost::multiprecision;
namespace {
key_t const dh_prime
("0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A63A36210000000000090563");
}
std::array<char, 96> export_key(key_t const& k)
{
std::array<char, 96> ret;
auto* begin = reinterpret_cast<std::uint8_t*>(ret.data());
std::uint8_t* end = mp::export_bits(k, begin, 8);
| ||
relevance 0 | ../src/pe_crypto.cpp:71 | it would be nice to be able to export to a fixed width field, so we wouldn't have to shift it later |
it would be nice to be able to export to a fixed width field, so
we wouldn't have to shift it later../src/pe_crypto.cpp:71#include "libtorrent/random.hpp"
#include "libtorrent/aux_/alloca.hpp"
#include "libtorrent/pe_crypto.hpp"
#include "libtorrent/hasher.hpp"
namespace libtorrent {
namespace mp = boost::multiprecision;
namespace {
key_t const dh_prime
("0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A63A36210000000000090563");
}
std::array<char, 96> export_key(key_t const& k)
{
std::array<char, 96> ret;
auto* begin = reinterpret_cast<std::uint8_t*>(ret.data());
std::uint8_t* end = mp::export_bits(k, begin, 8);
if (end < begin + 96)
{
int const len = int(end - begin);
#if defined __GNUC__ && __GNUC__ == 12
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstringop-overflow"
#endif
std::memmove(begin + 96 - len, begin, aux::numeric_cast<std::size_t>(len));
#if defined __GNUC__ && __GNUC__ == 12
#pragma GCC diagnostic pop
#endif
std::memset(begin, 0, aux::numeric_cast<std::size_t>(96 - len));
}
return ret;
}
void rc4_init(const unsigned char* in, std::size_t len, rc4 *state);
std::size_t rc4_encrypt(unsigned char *out, std::size_t outlen, rc4 *state);
// Set the prime P and the generator, generate local public key
dh_key_exchange::dh_key_exchange()
{
aux::array<std::uint8_t, 96> random_key;
aux::random_bytes({reinterpret_cast<char*>(random_key.data())
, static_cast<std::ptrdiff_t>(random_key.size())});
// create local key (random)
mp::import_bits(m_dh_local_secret, random_key.begin(), random_key.end());
// key = (2 ^ secret) % prime
m_dh_local_key = mp::powm(key_t(2), m_dh_local_secret, dh_prime);
| ||
relevance 0 | ../src/torrent.cpp:1941 | this could be optimized by looking up which files are complete and just look at those |
this could be optimized by looking up which files are
complete and just look at those../src/torrent.cpp:1941// TORRENT_ASSERT(picker().have_piece(i));
we_have(i);
}
}
set_state(torrent_status::checking_resume_data);
aux::vector<std::string, file_index_t> links;
#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
if (!m_torrent_file->similar_torrents().empty()
|| !m_torrent_file->collections().empty())
{
resolve_links res(m_torrent_file);
for (auto const& ih : m_torrent_file->similar_torrents())
{
std::shared_ptr<torrent> t = m_ses.find_torrent(info_hash_t(ih)).lock();
if (!t) continue;
// Only attempt to reuse files from torrents that are seeding.
if (!t->is_seed()) continue;
res.match(t->get_torrent_file(), t->save_path());
}
for (auto const& c : m_torrent_file->collections())
{
std::vector<std::shared_ptr<torrent>> ts = m_ses.find_collection(c);
for (auto const& t : ts)
{
// Only attempt to reuse files from torrents that are seeding.
| ||
relevance 0 | ../src/torrent.cpp:1954 | this could be optimized by looking up which files are complete and just look at those |
this could be optimized by looking up which files are
complete and just look at those../src/torrent.cpp:1954 {
resolve_links res(m_torrent_file);
for (auto const& ih : m_torrent_file->similar_torrents())
{
std::shared_ptr<torrent> t = m_ses.find_torrent(info_hash_t(ih)).lock();
if (!t) continue;
// Only attempt to reuse files from torrents that are seeding.
if (!t->is_seed()) continue;
res.match(t->get_torrent_file(), t->save_path());
}
for (auto const& c : m_torrent_file->collections())
{
std::vector<std::shared_ptr<torrent>> ts = m_ses.find_collection(c);
for (auto const& t : ts)
{
// Only attempt to reuse files from torrents that are seeding.
if (!t->is_seed()) continue;
res.match(t->get_torrent_file(), t->save_path());
}
}
std::vector<resolve_links::link_t> const& l = res.get_links();
if (!l.empty())
{
links.resize(m_torrent_file->files().num_files());
for (auto const& i : l)
{
if (!i.ti) continue;
links[i.file_idx] = combine_path(i.save_path
, i.ti->files().file_path(i.file_idx));
}
}
}
#endif // TORRENT_DISABLE_MUTABLE_TORRENTS
#if TORRENT_USE_ASSERTS
TORRENT_ASSERT(m_outstanding_check_files == false);
m_outstanding_check_files = true;
#endif
if (!m_add_torrent_params || !(m_add_torrent_params->flags & torrent_flags::no_verify_files))
{
m_ses.disk_thread().async_check_files(
m_storage, m_add_torrent_params ? m_add_torrent_params.get() : nullptr
, std::move(links), [self = shared_from_this()](status_t st, storage_error const& error)
{ self->on_resume_data_checked(st, error); });
| ||
relevance 0 | ../src/torrent.cpp:2715 | this pattern is repeated in a few places. Factor this into a function and generalize the concept of a torrent having a dedicated listen port |
this pattern is repeated in a few places. Factor this into
a function and generalize the concept of a torrent having a
dedicated listen port../src/torrent.cpp:2715 // not ready for peers. Except, if we don't have metadata,
// we need peers to download from
if (!m_files_checked && valid_metadata()) return;
if (!m_announce_to_lsd) return;
// private torrents are never announced on LSD
if (m_torrent_file->is_valid() && m_torrent_file->priv()) return;
#if TORRENT_USE_I2P
// i2p torrents are also never announced on LSD
// unless we allow mixed swarms
if (is_i2p() && !settings().get_bool(settings_pack::allow_i2p_mixed))
return;
#endif
if (is_paused()) return;
if (!m_ses.has_lsd()) return;
#ifdef TORRENT_SSL_PEERS
int port = is_ssl_torrent() ? m_ses.ssl_listen_port() : m_ses.listen_port();
#else
int port = m_ses.listen_port();
#endif
// announce with the local discovery service
m_torrent_file->info_hashes().for_each([&](sha1_hash const& ih, protocol_version)
{
m_ses.announce_lsd(ih, port);
});
}
#ifndef TORRENT_DISABLE_DHT
void torrent::dht_announce()
{
TORRENT_ASSERT(is_single_thread());
if (!m_ses.dht())
{
#ifndef TORRENT_DISABLE_LOGGING
debug_log("DHT: no dht initialized");
#endif
return;
}
if (!should_announce_dht())
{
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
#if TORRENT_USE_I2P
| ||
relevance 0 | ../src/torrent.cpp:3870 | add one peer per IP the hostname resolves to |
add one peer per IP the hostname resolves to../src/torrent.cpp:3870 catch (...) { handle_exception(); }
#endif
void torrent::on_peer_name_lookup(error_code const& e
, std::vector<address> const& host_list, int const port
, protocol_version const v) try
{
TORRENT_ASSERT(is_single_thread());
INVARIANT_CHECK;
COMPLETE_ASYNC("torrent::on_peer_name_lookup");
#ifndef TORRENT_DISABLE_LOGGING
if (e && should_log())
debug_log("peer name lookup error: %s", e.message().c_str());
#endif
if (e || m_abort || host_list.empty() || m_ses.is_aborted()) return;
tcp::endpoint host(host_list.front(), std::uint16_t(port));
if (m_ip_filter && m_ip_filter->access(host.address()) & ip_filter::blocked)
{
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
debug_log("blocked ip from tracker: %s", host.address().to_string().c_str());
}
#endif
if (m_ses.alerts().should_post<peer_blocked_alert>())
m_ses.alerts().emplace_alert<peer_blocked_alert>(get_handle()
, host, peer_blocked_alert::ip_filter);
return;
}
if (add_peer(host, peer_info::tracker, v == protocol_version::V2 ? pex_lt_v2 : pex_flags_t(0)))
{
state_updated();
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
debug_log("name-lookup add_peer() [ %s ] connect-candidates: %d"
, host.address().to_string().c_str()
, m_peer_list ? m_peer_list->num_connect_candidates() : -1);
}
#endif
}
update_want_peers();
}
| ||
relevance 0 | ../src/torrent.cpp:4562 | only do this if the piece size > 1 blocks This is a v2 torrent so we can request get block level hashes. |
only do this if the piece size > 1 blocks
This is a v2 torrent so we can request get block
level hashes.../src/torrent.cpp:4562 {
std::set<torrent_peer*> ret;
if (!blocks.empty() && !downloaders.empty())
{
for (auto const b : blocks) ret.insert(downloaders[std::size_t(b)]);
}
else
{
std::copy(downloaders.begin(), downloaders.end(), std::inserter(ret, ret.begin()));
}
return ret;
}();
// if this piece wasn't downloaded from peers, we just found it on disk.
// In that case, we should just consider it as "not-have" and there's no
// need to try to get higher fidelity hashes (yet)
bool const found_on_disk = peers.size() == 1 && peers.count(nullptr);
if (!torrent_file().info_hashes().has_v1() && blocks.empty() && !found_on_disk)
{
verify_block_hashes(index);
}
// the below code is penalizing peers that sent use bad data.
// increase the total amount of failed bytes
if (!found_on_disk)
{
if (blocks.empty())
add_failed_bytes(m_torrent_file->piece_size(index));
else
add_failed_bytes(static_cast<int>(blocks.size()) * default_block_size);
#ifndef TORRENT_DISABLE_EXTENSIONS
for (auto& ext : m_extensions)
{
ext->on_piece_failed(index);
}
#endif
// did we receive this piece from a single peer?
// if we know exactly which blocks failed the hash, we can also be certain
// that all peers in the list sent us bad data
bool const known_bad_peer = (!found_on_disk && peers.size() == 1) || !blocks.empty();
penalize_peers(peers, index, known_bad_peer);
}
// If m_storage isn't set here, it means we're shutting down
if (m_storage)
{
// it doesn't make much sense to fail to hash a piece
| ||
relevance 0 | ../src/torrent.cpp:7501 | come up with a better way of doing this, instead of an immediately invoked lambda expression. |
come up with a better way of doing this, instead of an
immediately invoked lambda expression.../src/torrent.cpp:7501 || peerinfo->confirmed_supports_utp))
{
sm = m_ses.utp_socket_manager();
}
// don't make a TCP connection if it's disabled
if (sm == nullptr && !settings().get_bool(settings_pack::enable_outgoing_tcp))
{
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
debug_log("discarding peer \"%s\": TCP connections disabled "
"[ supports-utp: %d ]", peerinfo->to_string().c_str()
, peerinfo->supports_utp);
}
#endif
return false;
}
}
aux::socket_type s = [&] {
#if TORRENT_USE_I2P
if (peerinfo->is_i2p_addr)
{
// It's not entirely obvious why this peer connection is not marked as
// one. The main feature of a peer connection is that whether or not we
// proxy it is configurable. When we use i2p, we want to always prox
// everything via i2p.
aux::proxy_settings proxy;
proxy.hostname = settings().get_str(settings_pack::i2p_hostname);
proxy.port = std::uint16_t(settings().get_int(settings_pack::i2p_port));
proxy.type = settings_pack::i2p_proxy;
aux::socket_type ret = instantiate_connection(m_ses.get_context()
, proxy, nullptr, nullptr, false, false);
i2p_stream& str = boost::get<i2p_stream>(ret);
str.set_local_i2p_endpoint(m_ses.local_i2p_endpoint());
str.set_destination(static_cast<i2p_peer*>(peerinfo)->dest());
str.set_command(i2p_stream::cmd_connect);
str.set_session_id(m_ses.i2p_session());
return ret;
}
else
#endif
{
void* userdata = nullptr;
#ifdef TORRENT_SSL_PEERS
if (is_ssl_torrent())
{
| ||
relevance 0 | ../src/torrent.cpp:9091 | perhaps 0 should actually mean 0 |
perhaps 0 should actually mean 0../src/torrent.cpp:9091 // finished torrents may not change their queue positions, as it's set to
// -1
if ((m_abort || is_finished()) && p != no_pos) return;
TORRENT_ASSERT((p == no_pos) == is_finished()
|| (!m_auto_managed && p == no_pos)
|| (m_abort && p == no_pos)
|| (!m_added && p == no_pos));
if (p == m_sequence_number) return;
TORRENT_ASSERT(p >= no_pos);
state_updated();
m_ses.set_queue_position(this, p);
}
void torrent::set_max_uploads(int limit, bool const state_update)
{
TORRENT_ASSERT(is_single_thread());
if (limit <= 0) limit = (1 << 24) - 1;
if (int(m_max_uploads) == limit) return;
if (state_update) state_updated();
m_max_uploads = aux::numeric_cast<std::uint32_t>(limit);
#ifndef TORRENT_DISABLE_LOGGING
if (should_log() && state_update)
debug_log("*** set-max-uploads: %d", m_max_uploads);
#endif
if (state_update)
set_need_save_resume(torrent_handle::if_config_changed);
}
void torrent::set_max_connections(int limit, bool const state_update)
{
TORRENT_ASSERT(is_single_thread());
| ||
relevance 0 | ../src/torrent.cpp:9108 | perhaps 0 should actually mean 0 |
perhaps 0 should actually mean 0../src/torrent.cpp:9108
void torrent::set_max_uploads(int limit, bool const state_update)
{
TORRENT_ASSERT(is_single_thread());
if (limit <= 0) limit = (1 << 24) - 1;
if (int(m_max_uploads) == limit) return;
if (state_update) state_updated();
m_max_uploads = aux::numeric_cast<std::uint32_t>(limit);
#ifndef TORRENT_DISABLE_LOGGING
if (should_log() && state_update)
debug_log("*** set-max-uploads: %d", m_max_uploads);
#endif
if (state_update)
set_need_save_resume(torrent_handle::if_config_changed);
}
void torrent::set_max_connections(int limit, bool const state_update)
{
TORRENT_ASSERT(is_single_thread());
if (limit <= 0) limit = (1 << 24) - 1;
if (int(m_max_connections) == limit) return;
if (state_update) state_updated();
m_max_connections = aux::numeric_cast<std::uint32_t>(limit);
update_want_peers();
#ifndef TORRENT_DISABLE_LOGGING
if (should_log() && state_update)
debug_log("*** set-max-connections: %d", m_max_connections);
#endif
if (num_peers() > int(m_max_connections))
{
disconnect_peers(num_peers() - m_max_connections
, errors::too_many_connections);
}
if (state_update)
set_need_save_resume(torrent_handle::if_config_changed);
}
void torrent::set_upload_limit(int const limit)
{
set_limit_impl(limit, peer_connection::upload_channel);
#ifndef TORRENT_DISABLE_LOGGING
debug_log("*** set-upload-limit: %d", limit);
#endif
}
void torrent::set_download_limit(int const limit)
{
| ||
relevance 0 | ../src/torrent.cpp:11034 | instead of resorting the whole list, insert the peers directly into the right place |
instead of resorting the whole list, insert the peers
directly into the right place../src/torrent.cpp:11034 std::printf("timed out [average-piece-time: %d ms ]\n"
, m_average_piece_time);
#endif
}
// pick all blocks for this piece. the peers list is kept up to date
// and sorted. when we issue a request to a peer, its download queue
// time will increase and it may need to be bumped in the peers list,
// since it's ordered by download queue time
pick_time_critical_block(peers, ignore_peers
, peers_with_requests
, pi, &i, m_picker.get()
, blocks_in_piece, timed_out);
// put back the peers we ignored into the peer list for the next piece
if (!ignore_peers.empty())
{
peers.insert(peers.begin(), ignore_peers.begin(), ignore_peers.end());
ignore_peers.clear();
std::sort(peers.begin(), peers.end()
, [] (peer_connection const* lhs, peer_connection const* rhs)
{ return lhs->download_queue_time(16*1024) < rhs->download_queue_time(16*1024); });
}
// if this peer's download time exceeds 2 seconds, we're done.
// We don't want to build unreasonably long request queues
if (!peers.empty() && peers[0]->download_queue_time() > milliseconds(2000))
break;
}
// commit all the time critical requests
for (auto p : peers_with_requests)
{
p->send_block_requests();
}
}
#endif // TORRENT_DISABLE_STREAMING
std::set<std::string> torrent::web_seeds(web_seed_entry::type_t const type) const
{
TORRENT_ASSERT(is_single_thread());
std::set<std::string> ret;
for (auto const& s : m_web_seeds)
{
if (s.peer_info.banned) continue;
if (s.removed) continue;
if (s.type != type) continue;
ret.insert(s.url);
}
return ret;
| ||
relevance 0 | ../src/merkle_tree.cpp:110 | in C++20, use std::identity |
in C++20, use std::identity../src/merkle_tree.cpp:110 allocate_full();
merkle_validate_copy(t, m_tree, root(), m_block_verified);
load_verified_bits(verified);
optimize_storage();
optimize_storage_piece_layer();
}
void merkle_tree::clear()
{
m_tree.clear();
m_tree.shrink_to_fit();
m_block_verified.clear();
m_mode = mode_t::empty_tree;
}
namespace {
struct identity
{
bool operator()(bool b) const { return b; }
};
}
void merkle_tree::load_sparse_tree(span<sha256_hash const> t
, std::vector<bool> const& mask
, std::vector<bool> const& verified)
{
INVARIANT_CHECK;
TORRENT_ASSERT(mask.size() == size());
if (size() != mask.size()) return;
int const first_block = block_layer_start();
int const end_block = first_block + m_num_blocks;
TORRENT_ASSERT(first_block < int(mask.size()));
TORRENT_ASSERT(end_block <= int(mask.size()));
// if the mask covers all blocks, go straight to block_layer
// mode, and validate
if (std::all_of(mask.begin() + first_block, mask.begin() + end_block, identity()))
{
// the index in t that points to first_block
auto const block_index = std::count_if(mask.begin(), mask.begin() + first_block, identity());
// discrepancy
if (t.size() < block_index + m_num_blocks)
return clear();
| ||
relevance 0 | ../src/merkle_tree.cpp:319 | this can be optimized by using m_tree as storage to fill this tree into, and then clear it if the hashes fail |
this can be optimized by using m_tree as storage to fill this
tree into, and then clear it if the hashes fail../src/merkle_tree.cpp:319 {
INVARIANT_CHECK;
// as we set the hashes of interior nodes, we may be able to validate
// block hashes that we had since earlier. Any blocks that can be
// validated, and failed, are added to this list
add_hashes_result_t ret;
// we already have all hashes
if (m_mode == mode_t::block_layer)
{
// since we're already on the block layer mode, we have the whole
// tree, and we've already reported any pieces as passing that may
// have existed in the tree when we completed it. At this point no
// more pieces should be reported as passed
return ret;
}
allocate_full();
int const leaf_count = merkle_num_leafs(int(hashes.size()));
aux::vector<sha256_hash> tree(merkle_num_nodes(leaf_count));
std::copy(hashes.begin(), hashes.end(), tree.end() - leaf_count);
// the end of a file is a special case, we may need to pad the leaf layer
if (leaf_count > hashes.size())
{
int const leaf_layer_size = num_leafs();
// assuming uncle_hashes lead all the way to the root, they tell us
// how many layers down we are
int const insert_layer_size = leaf_count << uncle_hashes.size();
if (leaf_layer_size != insert_layer_size)
{
sha256_hash const pad_hash = merkle_pad(leaf_layer_size, insert_layer_size);
for (int i = int(hashes.size()); i < leaf_count; ++i)
tree[tree.end_index() - leaf_count + i] = pad_hash;
}
}
merkle_fill_tree(tree, leaf_count);
int const base_num_layers = merkle_num_layers(leaf_count);
// this is the index of the node where we'll insert the root of the
// subtree (tree). It's also the hash the uncle_hashes are here to prove
// is valid.
int const insert_root_idx = dest_start_idx >> base_num_layers;
// start with validating the proofs, and inserting them as we go.
if (!merkle_validate_and_insert_proofs(m_tree, insert_root_idx, tree[0], uncle_hashes))
return {};
| ||
relevance 0 | ../src/merkle_tree.cpp:367 | a piece outside of this range may also fail, if one of the uncle hashes is at the layer right above the block hashes |
a piece outside of this range may also fail, if one of the uncle
hashes is at the layer right above the block hashes../src/merkle_tree.cpp:367 int const insert_root_idx = dest_start_idx >> base_num_layers;
// start with validating the proofs, and inserting them as we go.
if (!merkle_validate_and_insert_proofs(m_tree, insert_root_idx, tree[0], uncle_hashes))
return {};
// first fill in the subtree of known hashes from the base layer
auto const num_leafs = merkle_num_leafs(m_num_blocks);
auto const first_leaf = merkle_first_leaf(num_leafs);
// this is the start of the leaf layer of "tree". We'll use this
// variable to step upwards towards the root
int source_cursor = int(tree.size()) - leaf_count;
// the running index in the loop
int dest_cursor = dest_start_idx;
// the number of tree levels in a piece hash. 0 means the block layer is
// the same as the piece layer
int const base = piece_levels();
for (int layer_size = leaf_count; layer_size != 0; layer_size /= 2)
{
for (int i = 0; i < layer_size; ++i)
{
int const dst_idx = dest_cursor + i;
int const src_idx = source_cursor + i;
if (has_node(dst_idx))
{
if (m_tree[dst_idx] != tree[src_idx])
{
// this must be a block hash because inner nodes are not filled in until
// they can be verified. This assert ensures we're at the
// leaf layer of the file tree
TORRENT_ASSERT(dst_idx >= first_leaf);
int const pos = dst_idx - first_leaf;
auto const piece = piece_index_t{pos >> m_blocks_per_piece_log} + file_piece_offset;
int const block = pos & ((1 << m_blocks_per_piece_log) - 1);
TORRENT_ASSERT(pos < m_num_blocks);
if (!ret.failed.empty() && ret.failed.back().first == piece)
ret.failed.back().second.push_back(block);
else
ret.failed.emplace_back(piece, std::vector<int>{block});
// now that this hash has been reported as failing, we
// can clear it. This will prevent it from being
// reported as failing again.
m_tree[dst_idx].clear();
}
else if (dst_idx >= first_leaf)
| ||
relevance 0 | ../src/merkle_tree.cpp:447 | instead of overwriting the root and comparing it against hashes[], write a functions that *validates* a tree by just filling it up to the level below the root and then validates it. |
instead of overwriting the root and comparing it
against hashes[], write a functions that *validates* a tree
by just filling it up to the level below the root and then
validates it.../src/merkle_tree.cpp:447 && dest_start_idx < first_piece_idx + num_pieces())
{
int const blocks_in_piece = 1 << base;
// it may now be possible to verify the hashes of previously received blocks
// try to verify as many child nodes of the received hashes as possible
for (int i = 0; i < int(hashes.size()); ++i)
{
int const piece = dest_start_idx + i;
if (piece - first_piece_idx >= num_pieces())
break;
// the first block in this piece
int const block_idx = merkle_get_first_child(piece, base);
int const block_end_idx = std::min(block_idx + blocks_in_piece, first_leaf + m_num_blocks);
if (std::any_of(m_tree.begin() + block_idx
, m_tree.begin() + block_end_idx
, [](sha256_hash const& h) { return h.is_all_zeros(); }))
continue;
merkle_fill_tree(m_tree, blocks_in_piece, block_idx);
if (m_tree[piece] != hashes[i])
{
merkle_clear_tree(m_tree, blocks_in_piece, block_idx);
// write back the correct hash
m_tree[piece] = hashes[i];
TORRENT_ASSERT(blocks_in_piece == blocks_per_piece());
// an empty blocks vector indicates that we don't have the
// block hashes, and we can't know which block failed
// this will cause the block hashes to be requested
ret.failed.emplace_back(piece_index_t{piece - first_piece_idx} + file_piece_offset
, std::vector<int>());
}
else
{
ret.passed.push_back(piece_index_t{piece - first_piece_idx} + file_piece_offset);
// record that these block hashes are correct!
int const leafs_start = block_idx - block_layer_start();
int const leafs_end = std::min(m_num_blocks, leafs_start + blocks_in_piece);
| ||
relevance 0 | ../src/merkle_tree.cpp:471 | this could be done more efficiently if bitfield had a function to set a range of bits |
this could be done more efficiently if bitfield had a function
to set a range of bits../src/merkle_tree.cpp:471 merkle_fill_tree(m_tree, blocks_in_piece, block_idx);
if (m_tree[piece] != hashes[i])
{
merkle_clear_tree(m_tree, blocks_in_piece, block_idx);
// write back the correct hash
m_tree[piece] = hashes[i];
TORRENT_ASSERT(blocks_in_piece == blocks_per_piece());
// an empty blocks vector indicates that we don't have the
// block hashes, and we can't know which block failed
// this will cause the block hashes to be requested
ret.failed.emplace_back(piece_index_t{piece - first_piece_idx} + file_piece_offset
, std::vector<int>());
}
else
{
ret.passed.push_back(piece_index_t{piece - first_piece_idx} + file_piece_offset);
// record that these block hashes are correct!
int const leafs_start = block_idx - block_layer_start();
int const leafs_end = std::min(m_num_blocks, leafs_start + blocks_in_piece);
for (int k = leafs_start; k < leafs_end; ++k)
m_block_verified.set_bit(k);
}
TORRENT_ASSERT((piece - first_piece_idx) >= 0);
}
}
optimize_storage();
return ret;
}
std::tuple<merkle_tree::set_block_result, int, int> merkle_tree::set_block(int const block_index
, sha256_hash const& h)
{
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
INVARIANT_CHECK;
#endif
TORRENT_ASSERT(block_index < m_num_blocks);
auto const num_leafs = merkle_num_leafs(m_num_blocks);
auto const first_leaf = merkle_first_leaf(num_leafs);
auto const block_tree_index = first_leaf + block_index;
if (blocks_verified(block_index, 1))
{
// if this blocks's hash is already known, check the passed-in hash against it
if (compare_node(block_tree_index, h))
return std::make_tuple(set_block_result::ok, block_index, 1);
else
return std::make_tuple(set_block_result::block_hash_failed, block_index, 1);
| ||
relevance 0 | ../src/merkle_tree.cpp:514 | use structured binding in C++17 |
use structured binding in C++17../src/merkle_tree.cpp:514 auto const first_leaf = merkle_first_leaf(num_leafs);
auto const block_tree_index = first_leaf + block_index;
if (blocks_verified(block_index, 1))
{
// if this blocks's hash is already known, check the passed-in hash against it
if (compare_node(block_tree_index, h))
return std::make_tuple(set_block_result::ok, block_index, 1);
else
return std::make_tuple(set_block_result::block_hash_failed, block_index, 1);
}
allocate_full();
m_tree[block_tree_index] = h;
// to avoid wasting a lot of time hashing nodes only to discover they
// cannot be verified, check first to see if the root of the largest
// computable subtree is known
int leafs_start;
int leafs_size;
int root_index;
std::tie(leafs_start, leafs_size, root_index) =
merkle_find_known_subtree(m_tree, block_index, m_num_blocks);
// if the root node is unknown the hashes cannot be verified yet
if (m_tree[root_index].is_all_zeros())
return std::make_tuple(set_block_result::unknown, leafs_start, leafs_size);
// save the root hash because merkle_fill_tree will overwrite it
sha256_hash const root = m_tree[root_index];
merkle_fill_tree(m_tree, leafs_size, first_leaf + leafs_start);
if (root != m_tree[root_index])
{
// hash failure, clear all the internal nodes
// the whole piece failed the hash check. Clear all block hashes
// in this piece and report a hash failure
merkle_clear_tree(m_tree, leafs_size, first_leaf + leafs_start);
m_tree[root_index] = root;
return std::make_tuple(set_block_result::hash_failed, leafs_start, leafs_size);
}
| ||
relevance 0 | ../src/merkle_tree.cpp:539 | this could be done more efficiently if bitfield had a function to set a range of bits |
this could be done more efficiently if bitfield had a function
to set a range of bits../src/merkle_tree.cpp:539 merkle_find_known_subtree(m_tree, block_index, m_num_blocks);
// if the root node is unknown the hashes cannot be verified yet
if (m_tree[root_index].is_all_zeros())
return std::make_tuple(set_block_result::unknown, leafs_start, leafs_size);
// save the root hash because merkle_fill_tree will overwrite it
sha256_hash const root = m_tree[root_index];
merkle_fill_tree(m_tree, leafs_size, first_leaf + leafs_start);
if (root != m_tree[root_index])
{
// hash failure, clear all the internal nodes
// the whole piece failed the hash check. Clear all block hashes
// in this piece and report a hash failure
merkle_clear_tree(m_tree, leafs_size, first_leaf + leafs_start);
m_tree[root_index] = root;
return std::make_tuple(set_block_result::hash_failed, leafs_start, leafs_size);
}
int const leafs_end = std::min(m_num_blocks, leafs_start + leafs_size);
for (int i = leafs_start; i < leafs_end; ++i)
m_block_verified.set_bit(i);
// attempting to optimize storage is quite costly, only do it if we have
// a reason to believe it might have an effect
if (block_index == m_num_blocks - 1 || !m_tree[block_tree_index + 1].is_all_zeros())
optimize_storage();
return std::make_tuple(set_block_result::ok, leafs_start, leafs_size);
}
std::size_t merkle_tree::size() const
{
return static_cast<std::size_t>(merkle_num_nodes(merkle_num_leafs(m_num_blocks)));
}
int merkle_tree::num_pieces() const
{
int const ps = blocks_per_piece();
TORRENT_ASSERT(ps > 0);
return (m_num_blocks + ps - 1) >> m_blocks_per_piece_log;
}
int merkle_tree::block_layer_start() const
{
int const num_leafs = merkle_num_leafs(m_num_blocks);
TORRENT_ASSERT(num_leafs > 0);
return merkle_first_leaf(num_leafs);
}
| ||
relevance 0 | ../src/ip_notifier.cpp:41 | simulator support |
simulator support../src/ip_notifier.cpp:41 from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "libtorrent/aux_/ip_notifier.hpp"
#include "libtorrent/assert.hpp"
#if defined TORRENT_BUILD_SIMULATOR
#elif TORRENT_USE_NETLINK
#include "libtorrent/netlink.hpp"
#include "libtorrent/socket.hpp"
#include <array>
#include <unordered_map>
#elif TORRENT_USE_SYSTEMCONFIGURATION || TORRENT_USE_SC_NETWORK_REACHABILITY
#include <SystemConfiguration/SystemConfiguration.h>
#elif defined TORRENT_WINDOWS
#include "libtorrent/aux_/throw.hpp"
#include "libtorrent/aux_/disable_warnings_push.hpp"
#include <iphlpapi.h>
#ifdef TORRENT_WINRT
#include <netioapi.h>
#endif
#include <mutex>
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#endif
#include "libtorrent/aux_/netlink_utils.hpp"
namespace libtorrent { namespace aux {
namespace {
#if (TORRENT_USE_SYSTEMCONFIGURATION || TORRENT_USE_SC_NETWORK_REACHABILITY) && \
!defined TORRENT_BUILD_SIMULATOR
// common utilities for Mac and iOS
template <typename T> void CFRefRetain(T h) { CFRetain(h); }
template <typename T> void CFRefRelease(T h) { CFRelease(h); }
| ||
relevance 0 | ../src/peer_connection.cpp:1091 | this should be the global download rate |
this should be the global download rate../src/peer_connection.cpp:1091
int rate = 0;
// if we haven't received any data recently, the current download rate
// is not representative
if (aux::time_now() - m_last_piece.get(m_connect) > seconds(30) && m_download_rate_peak > 0)
{
rate = m_download_rate_peak;
}
else if (aux::time_now() - m_last_unchoked.get(m_connect) < seconds(5)
&& m_statistics.total_payload_upload() < 2 * 0x4000)
{
// if we're have only been unchoked for a short period of time,
// we don't know what rate we can get from this peer. Instead of assuming
// the lowest possible rate, assume the average.
int peers_with_requests = int(stats_counters()[counters::num_peers_down_requests]);
// avoid division by 0
if (peers_with_requests == 0) peers_with_requests = 1;
rate = t->statistics().transfer_rate(stat::download_payload) / peers_with_requests;
}
else
{
// current download rate in bytes per seconds
rate = m_statistics.transfer_rate(stat::download_payload);
}
// avoid division by zero
if (rate < 50) rate = 50;
// average of current rate and peak
// rate = (rate + m_download_rate_peak) / 2;
return milliseconds((m_outstanding_bytes + extra_bytes
+ m_queued_time_critical * t->block_size() * 1000) / rate);
}
void peer_connection::add_stat(std::int64_t const downloaded, std::int64_t const uploaded)
{
TORRENT_ASSERT(is_single_thread());
m_statistics.add_stat(downloaded, uploaded);
}
sha1_hash peer_connection::associated_info_hash() const
{
std::shared_ptr<torrent> t = associated_torrent().lock();
TORRENT_ASSERT(t);
auto const& ih = t->info_hash();
// if protocol_v2 is set on the peer, this better be a v2 torrent,
// otherwise something isn't right
| ||
relevance 0 | ../src/peer_connection.cpp:3520 | sort the allowed fast set in priority order |
sort the allowed fast set in priority order../src/peer_connection.cpp:3520
// if the peer has the piece and we want
// to download it, request it
if (index < m_have_piece.end_index()
&& m_have_piece[index]
&& !t->has_piece_passed(index)
&& t->valid_metadata()
&& t->has_picker()
&& t->picker().piece_priority(index) > dont_download)
{
t->peer_is_interesting(*this);
}
}
std::vector<piece_index_t> const& peer_connection::allowed_fast()
{
TORRENT_ASSERT(is_single_thread());
std::shared_ptr<torrent> t = m_torrent.lock();
TORRENT_ASSERT(t);
return m_allowed_fast;
}
bool peer_connection::can_request_time_critical() const
{
TORRENT_ASSERT(is_single_thread());
if (has_peer_choked() || !is_interesting()) return false;
if (int(m_download_queue.size()) + int(m_request_queue.size())
> m_desired_queue_size * 2) return false;
if (on_parole()) return false;
if (m_disconnecting) return false;
std::shared_ptr<torrent> t = m_torrent.lock();
TORRENT_ASSERT(t);
if (t->upload_mode()) return false;
// ignore snubbed peers, since they're not likely to return pieces in a
// timely manner anyway
if (m_snubbed) return false;
return true;
}
bool peer_connection::make_time_critical(piece_block const& block)
{
TORRENT_ASSERT(is_single_thread());
auto const rit = std::find_if(m_request_queue.begin()
, m_request_queue.end(), aux::has_block(block));
if (rit == m_request_queue.end()) return false;
#if TORRENT_USE_ASSERTS
std::shared_ptr<torrent> t = m_torrent.lock();
TORRENT_ASSERT(t);
TORRENT_ASSERT(t->has_picker());
| ||
relevance 0 | ../src/part_file.cpp:300 | what do we do if someone is currently reading from the disk from this piece? does it matter? Since we won't actively erase the data from disk, but it may be overwritten soon, it's probably not that big of a deal |
what do we do if someone is currently reading from the disk
from this piece? does it matter? Since we won't actively erase the
data from disk, but it may be overwritten soon, it's probably not that
big of a deal../src/part_file.cpp:300 create_directories(m_path, ec);
if (ec) return {};
return aux::file_handle(fn, 0, mode);
}
return {};
}
}
catch (storage_error const& e)
{
ec = e.ec;
return {};
}
void part_file::free_piece(piece_index_t const piece)
{
std::lock_guard<std::mutex> l(m_mutex);
auto const i = m_piece_map.find(piece);
if (i == m_piece_map.end()) return;
m_free_slots.push_back(i->second);
m_piece_map.erase(i);
m_dirty_metadata = true;
}
void part_file::move_partfile(std::string const& path, error_code& ec)
{
std::lock_guard<std::mutex> l(m_mutex);
flush_metadata_impl(ec);
if (ec) return;
if (!m_piece_map.empty())
{
std::string old_path = combine_path(m_path, m_name);
std::string new_path = combine_path(path, m_name);
rename(old_path, new_path, ec);
if (ec == boost::system::errc::no_such_file_or_directory)
ec.clear();
if (ec)
{
storage_error se;
aux::copy_file(old_path, new_path, se);
ec = se.ec;
if (ec) return;
remove(old_path, ec);
}
}
| ||
relevance 0 | ../src/part_file.cpp:412 | instead of rebuilding the whole file header and flushing it, update the slot entries as we go |
instead of rebuilding the whole file header
and flushing it, update the slot entries as we go../src/part_file.cpp:412 TORRENT_ASSERT(j->second == slot);
m_free_slots.push_back(j->second);
m_piece_map.erase(j);
m_dirty_metadata = true;
}
}
}
file_offset += block_to_copy;
piece_offset = 0;
size -= block_to_copy;
}
}
void part_file::flush_metadata(error_code& ec)
{
std::lock_guard<std::mutex> l(m_mutex);
flush_metadata_impl(ec);
}
void part_file::flush_metadata_impl(error_code& ec)
{
// do we need to flush the metadata?
if (m_dirty_metadata == false) return;
if (m_piece_map.empty())
{
// if we don't have any pieces left in the
// part file, remove it
std::string const p = combine_path(m_path, m_name);
remove(p, ec);
if (ec == boost::system::errc::no_such_file_or_directory)
ec.clear();
return;
}
auto f = open_file(aux::open_mode::write | aux::open_mode::hidden, ec);
if (ec) return;
std::vector<char> header(static_cast<std::size_t>(m_header_size));
using namespace libtorrent::aux;
char* ptr = header.data();
write_uint32(m_max_pieces, ptr);
write_uint32(m_piece_size, ptr);
for (piece_index_t piece(0); piece < piece_index_t(m_max_pieces); ++piece)
{
auto const i = m_piece_map.find(piece);
| ||
relevance 0 | ../src/torrent_info.cpp:875 | this should be considered a failure, and the .torrent file rejected |
this should be considered a failure, and the .torrent file
rejected../src/torrent_info.cpp:875 std::string ext = extension(filename);
int cnt = 0;
for (;;)
{
++cnt;
char new_ext[50];
std::snprintf(new_ext, sizeof(new_ext), ".%d%s", cnt, ext.c_str());
filename = base + new_ext;
boost::crc_optimal<32, 0x1EDC6F41, 0xFFFFFFFF, 0xFFFFFFFF, true, true> crc;
process_string_lowercase(crc, filename);
std::uint32_t const new_hash = crc.checksum();
if (files.find(new_hash) == files.end())
{
files.insert({new_hash, {i, 0}});
break;
}
++num_collisions;
if (num_collisions > 100)
{
}
}
copy_on_write();
m_files.rename_file(i, filename);
}
}
void torrent_info::remap_files(file_storage const& f)
{
INVARIANT_CHECK;
TORRENT_ASSERT(is_loaded());
// the new specified file storage must have the exact
// same size as the current file storage
TORRENT_ASSERT(m_files.total_size() == f.total_size());
if (m_files.total_size() != f.total_size()) return;
copy_on_write();
m_files = f;
m_files.set_num_pieces(m_orig_files->num_pieces());
m_files.set_piece_length(m_orig_files->piece_length());
}
#if TORRENT_ABI_VERSION == 1
// standard constructor that parses a torrent file
torrent_info::torrent_info(entry const& torrent_file)
{
std::vector<char> tmp;
std::back_insert_iterator<std::vector<char>> out(tmp);
bencode(out, torrent_file);
| ||
relevance 0 | ../src/settings_pack.cpp:305 | deprecate this |
deprecate this../src/settings_pack.cpp:305 SET(outgoing_port, 0, nullptr),
SET(num_outgoing_ports, 0, nullptr),
SET(peer_dscp, 0x04, &session_impl::update_peer_dscp),
SET(active_downloads, 3, &session_impl::trigger_auto_manage),
SET(active_seeds, 5, &session_impl::trigger_auto_manage),
SET(active_checking, 1, &session_impl::trigger_auto_manage),
SET(active_dht_limit, 88, nullptr),
SET(active_tracker_limit, 1600, nullptr),
SET(active_lsd_limit, 60, nullptr),
SET(active_limit, 500, &session_impl::trigger_auto_manage),
DEPRECATED_SET(active_loaded_limit, 0, &session_impl::trigger_auto_manage),
SET(auto_manage_interval, 30, nullptr),
SET(seed_time_limit, 24 * 60 * 60, nullptr),
SET(auto_scrape_interval, 1800, nullptr),
SET(auto_scrape_min_interval, 300, nullptr),
SET(max_peerlist_size, 3000, nullptr),
SET(max_paused_peerlist_size, 1000, nullptr),
SET(min_announce_interval, 5 * 60, nullptr),
SET(auto_manage_startup, 60, nullptr),
SET(seeding_piece_quota, 20, nullptr),
SET(max_rejects, 50, nullptr),
SET(recv_socket_buffer_size, 0, &session_impl::update_socket_buffer_size),
SET(send_socket_buffer_size, 0, &session_impl::update_socket_buffer_size),
SET(max_peer_recv_buffer_size, 2 * 1024 * 1024, nullptr),
DEPRECATED_SET(file_checks_delay_per_block, 0, nullptr),
DEPRECATED2_SET(read_cache_line_size, 32, nullptr),
DEPRECATED2_SET(write_cache_line_size, 16, nullptr),
SET(optimistic_disk_retry, 10 * 60, nullptr),
SET(max_suggest_pieces, 16, nullptr),
SET(local_service_announce_interval, 5 * 60, nullptr),
SET(dht_announce_interval, 15 * 60, &session_impl::update_dht_announce_interval),
SET(udp_tracker_token_expiry, 60, nullptr),
DEPRECATED_SET(default_cache_min_age, 1, nullptr),
SET(num_optimistic_unchoke_slots, 0, nullptr),
DEPRECATED_SET(default_est_reciprocation_rate, 16000, nullptr),
DEPRECATED_SET(increase_est_reciprocation_rate, 20, nullptr),
DEPRECATED_SET(decrease_est_reciprocation_rate, 3, nullptr),
SET(max_pex_peers, 50, nullptr),
SET(tick_interval, 500, nullptr),
SET(share_mode_target, 3, nullptr),
SET(upload_rate_limit, 0, &session_impl::update_upload_rate),
SET(download_rate_limit, 0, &session_impl::update_download_rate),
DEPRECATED_SET(local_upload_rate_limit, 0, &session_impl::update_local_upload_rate),
DEPRECATED_SET(local_download_rate_limit, 0, &session_impl::update_local_download_rate),
SET(dht_upload_rate_limit, 8000, &session_impl::update_dht_upload_rate_limit),
SET(unchoke_slots_limit, 8, &session_impl::update_unchoke_limit),
DEPRECATED_SET(half_open_limit, 0, nullptr),
SET(connections_limit, 200, &session_impl::update_connections_limit),
SET(connections_slack, 10, nullptr),
SET(utp_target_delay, 100, nullptr),
SET(utp_gain_factor, 3000, nullptr),
| ||
relevance 0 | ../src/settings_pack.cpp:589 | it would be nice to reserve() these vectors up front |
it would be nice to reserve() these vectors up front../src/settings_pack.cpp:589 s.set_str(settings_pack::string_type_base | i, str_settings[i].default_value);
TORRENT_ASSERT(s.get_str(settings_pack::string_type_base + i) == str_settings[i].default_value);
}
for (int i = 0; i < settings_pack::num_int_settings; ++i)
{
s.set_int(settings_pack::int_type_base | i, int_settings[i].default_value);
TORRENT_ASSERT(s.get_int(settings_pack::int_type_base + i) == int_settings[i].default_value);
}
for (int i = 0; i < settings_pack::num_bool_settings; ++i)
{
s.set_bool(settings_pack::bool_type_base | i, bool_settings[i].default_value);
TORRENT_ASSERT(s.get_bool(settings_pack::bool_type_base + i) == bool_settings[i].default_value);
}
}
settings_pack default_settings()
{
settings_pack ret;
for (int i = 0; i < settings_pack::num_string_settings; ++i)
{
if (str_settings[i].default_value == nullptr) continue;
ret.set_str(settings_pack::string_type_base + i, str_settings[i].default_value);
}
for (int i = 0; i < settings_pack::num_int_settings; ++i)
{
ret.set_int(settings_pack::int_type_base + i, int_settings[i].default_value);
}
for (int i = 0; i < settings_pack::num_bool_settings; ++i)
{
ret.set_bool(settings_pack::bool_type_base + i, bool_settings[i].default_value);
}
return ret;
}
void apply_pack(settings_pack const* pack, aux::session_settings& sett
, aux::session_impl* ses)
{
using fun_t = void (aux::session_impl::*)();
std::vector<fun_t> callbacks;
sett.bulk_set([&](aux::session_settings_single_thread& s)
{
apply_pack_impl(pack, s, ses ? &callbacks : nullptr);
});
// call the callbacks once all the settings have been applied, and
// only once per callback
| ||
relevance 0 | ../src/packet_buffer.cpp:157 | use compare_less_wrap for this comparison as well |
use compare_less_wrap for this comparison as well../src/packet_buffer.cpp:157 {
INVARIANT_CHECK;
TORRENT_ASSERT_VAL(size <= 0xffff, size);
std::uint32_t new_size = m_capacity == 0 ? 16 : m_capacity;
while (new_size < size)
new_size <<= 1;
aux::unique_ptr<packet_ptr[], index_type> new_storage(new packet_ptr[new_size]);
for (index_type i = m_first; i < (m_first + m_capacity); ++i)
new_storage[i & (new_size - 1)] = std::move(m_storage[i & (m_capacity - 1)]);
m_storage = std::move(new_storage);
m_capacity = new_size;
}
packet_ptr packet_buffer::remove(index_type idx)
{
INVARIANT_CHECK;
if (idx >= m_first + m_capacity)
return packet_ptr();
if (compare_less_wrap(idx, m_first, 0xffff))
return packet_ptr();
std::size_t const mask = m_capacity - 1;
packet_ptr old_value = std::move(m_storage[idx & mask]);
m_storage[idx & mask].reset();
if (old_value)
{
--m_size;
if (m_size == 0) m_last = m_first;
}
if (idx == m_first && m_size != 0)
{
++m_first;
for (index_type i = 0; i < m_capacity; ++i, ++m_first)
if (m_storage[m_first & mask]) break;
m_first &= 0xffff;
}
if (((idx + 1) & 0xffff) == m_last && m_size != 0)
{
--m_last;
for (index_type i = 0; i < m_capacity; ++i, --m_last)
if (m_storage[m_last & mask]) break;
++m_last;
m_last &= 0xffff;
| ||
relevance 0 | ../src/performance_counters.cpp:40 | move stats_counter_t out of counters |
move stats_counter_t out of counters../src/performance_counters.cpp:40 | ||
relevance 0 | ../src/performance_counters.cpp:41 | should bittorrent keep-alive messages have a counter too? |
should bittorrent keep-alive messages have a counter too?../src/performance_counters.cpp:41 | ||
relevance 0 | ../src/performance_counters.cpp:42 | It would be nice if this could be an internal type. default_disk_constructor depends on it now |
It would be nice if this could be an internal type. default_disk_constructor depends on it now../src/performance_counters.cpp:42THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "libtorrent/performance_counters.hpp"
#include "libtorrent/assert.hpp"
#include <cstring> // for memset
namespace libtorrent {
counters::counters() TORRENT_COUNTER_NOEXCEPT
{
#ifdef ATOMIC_LLONG_LOCK_FREE
for (auto& counter : m_stats_counter)
counter.store(0, std::memory_order_relaxed);
#else
m_stats_counter.fill(0);
#endif
}
counters::counters(counters const& c) TORRENT_COUNTER_NOEXCEPT
{
#ifdef ATOMIC_LLONG_LOCK_FREE
for (int i = 0; i < m_stats_counter.end_index(); ++i)
m_stats_counter[i].store(
c.m_stats_counter[i].load(std::memory_order_relaxed)
, std::memory_order_relaxed);
#else
std::lock_guard<std::mutex> l(c.m_mutex);
m_stats_counter = c.m_stats_counter;
#endif
}
counters& counters::operator=(counters const& c) & TORRENT_COUNTER_NOEXCEPT
{
if (&c == this) return *this;
#ifdef ATOMIC_LLONG_LOCK_FREE
for (int i = 0; i < m_stats_counter.end_index(); ++i)
m_stats_counter[i].store(
c.m_stats_counter[i].load(std::memory_order_relaxed)
, std::memory_order_relaxed);
| ||
relevance 0 | ../src/hash_picker.cpp:309 | use structured bindings in C++17 |
use structured bindings in C++17../src/hash_picker.cpp:309 , int const offset, sha256_hash const& h)
{
TORRENT_ASSERT(offset >= 0);
auto const f = m_files.file_index_at_piece(piece);
if (m_files.pad_file_at(f))
return { set_block_hash_result::result::success, 0, 0 };
auto& merkle_tree = m_merkle_trees[f];
piece_index_t const file_first_piece = m_files.piece_index_at_file(f);
std::int64_t const block_offset = static_cast<int>(piece) * std::int64_t(m_files.piece_length())
+ offset - m_files.file_offset(f);
int const block_index = aux::numeric_cast<int>(block_offset / default_block_size);
if (h.is_all_zeros())
{
TORRENT_ASSERT_FAIL();
return set_block_hash_result::block_hash_failed();
}
aux::merkle_tree::set_block_result result;
int leafs_index;
int leafs_size;
std::tie(result, leafs_index, leafs_size) = merkle_tree.set_block(block_index, h);
if (result == aux::merkle_tree::set_block_result::unknown)
return set_block_hash_result::unknown();
if (result == aux::merkle_tree::set_block_result::block_hash_failed)
return set_block_hash_result::block_hash_failed();
auto const status = (result == aux::merkle_tree::set_block_result::hash_failed)
? set_block_hash_result::result::piece_hash_failed
: set_block_hash_result::result::success;
int const blocks_per_piece = m_files.piece_length() / default_block_size;
return { status
, int(leafs_index - static_cast<int>(piece - file_first_piece) * blocks_per_piece)
, std::min(leafs_size, m_files.file_num_pieces(f) * blocks_per_piece - leafs_index) };
}
void hash_picker::hashes_rejected(hash_request const& req)
{
TORRENT_ASSERT(req.base == m_piece_layer && req.index % 512 == 0);
for (int i = req.index; i < req.index + req.count; i += 512)
{
m_piece_hash_requested[req.file][i / 512].last_request = min_time();
--m_piece_hash_requested[req.file][i / 512].num_requests;
}
| ||
relevance 0 | ../src/web_connection_base.cpp:72 | introduce a web-seed default class which has a low download priority |
introduce a web-seed default class which has a low download priority../src/web_connection_base.cpp:72 : peer_connection(pack)
, m_first_request(true)
, m_ssl(false)
, m_external_auth(web.auth)
, m_extra_headers(web.extra_headers)
, m_parser(http_parser::dont_parse_chunks)
, m_body_start(0)
{
TORRENT_ASSERT(&web.peer_info == pack.peerinfo);
// when going through a proxy, we don't necessarily have an endpoint here,
// since the proxy might be resolving the hostname, not us
TORRENT_ASSERT(web.endpoints.empty() || web.endpoints.front() == pack.endp);
INVARIANT_CHECK;
TORRENT_ASSERT(is_outgoing());
TORRENT_ASSERT(!m_torrent.lock()->is_upload_only());
// we only want left-over bandwidth
std::string protocol;
error_code ec;
std::tie(protocol, m_basic_auth, m_host, m_port, m_path)
= parse_url_components(web.url, ec);
TORRENT_ASSERT(!ec);
if (m_port == -1 && protocol == "http")
m_port = 80;
#if TORRENT_USE_SSL
if (protocol == "https")
{
m_ssl = true;
if (m_port == -1) m_port = 443;
}
#endif
if (!m_basic_auth.empty())
m_basic_auth = base64encode(m_basic_auth);
m_server_string = m_host;
aux::verify_encoding(m_server_string);
}
int web_connection_base::timeout() const
{
// since this is a web seed, change the timeout
// according to the settings.
return m_settings.get_int(settings_pack::urlseed_timeout);
}
| ||
relevance 0 | ../src/enum_net.cpp:144 | in C++17, use __has_include for this. Other operating systems are likely to require this as well |
in C++17, use __has_include for this. Other operating systems are
likely to require this as well../src/enum_net.cpp:144#include <arpa/inet.h>
#include <cstring>
#include <cstdlib>
#include <unistd.h>
#include <sys/types.h>
#if defined TORRENT_ANDROID && !defined IFA_F_DADFAILED
#define IFA_F_DADFAILED 8
#endif
#endif
#if TORRENT_USE_IFADDRS
#include <ifaddrs.h>
#include <net/if.h>
#include <sys/ioctl.h>
#endif
#if TORRENT_USE_IFADDRS || TORRENT_USE_IFCONF || TORRENT_USE_NETLINK || TORRENT_USE_SYSCTL
#ifdef TORRENT_BEOS
#include <sys/sockio.h>
#endif
// capture this here where warnings are disabled (the macro generates warnings)
const unsigned long siocgifmtu = SIOCGIFMTU;
#endif
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#if defined(TORRENT_OS2) && !defined(IF_NAMESIZE)
#define IF_NAMESIZE IFNAMSIZ
#endif
namespace libtorrent {
namespace {
#if !defined TORRENT_WINDOWS && !defined TORRENT_BUILD_SIMULATOR
struct socket_closer
{
socket_closer(int s) : m_socket(s) {}
socket_closer(socket_closer const&) = delete;
socket_closer(socket_closer &&) = delete;
socket_closer& operator=(socket_closer const&) = delete;
socket_closer& operator=(socket_closer &&) = delete;
~socket_closer() { ::close(m_socket); }
private:
int m_socket;
};
#endif
#if !defined TORRENT_BUILD_SIMULATOR
| ||
relevance 0 | ../src/enum_net.cpp:268 | if we get here, the caller still assumes the error code is reported via errno |
if we get here, the caller still assumes the error code
is reported via errno../src/enum_net.cpp:268 ;
}
#endif
#if TORRENT_USE_NETLINK
int read_nl_sock(int sock, std::uint32_t const seq, std::uint32_t const pid
, std::function<void(nlmsghdr const*)> on_msg)
{
std::array<char, 4096> buf;
for (;;)
{
int const read_len = int(recv(sock, buf.data(), buf.size(), 0));
if (read_len < 0) return -1;
auto const* nl_hdr = reinterpret_cast<nlmsghdr const*>(buf.data());
int len = read_len;
for (; len > 0 && aux::nlmsg_ok(nl_hdr, len); nl_hdr = aux::nlmsg_next(nl_hdr, len))
{
if ((aux::nlmsg_ok(nl_hdr, read_len) == 0) || (nl_hdr->nlmsg_type == NLMSG_ERROR))
return -1;
// this function doesn't handle multiple requests at the same time
// so report an error if the message does not have the expected seq and pid
| ||
relevance 0 | ../src/enum_net.cpp:274 | if we get here, the caller still assumes the error code is reported via errno |
if we get here, the caller still assumes the error code
is reported via errno../src/enum_net.cpp:274#if TORRENT_USE_NETLINK
int read_nl_sock(int sock, std::uint32_t const seq, std::uint32_t const pid
, std::function<void(nlmsghdr const*)> on_msg)
{
std::array<char, 4096> buf;
for (;;)
{
int const read_len = int(recv(sock, buf.data(), buf.size(), 0));
if (read_len < 0) return -1;
auto const* nl_hdr = reinterpret_cast<nlmsghdr const*>(buf.data());
int len = read_len;
for (; len > 0 && aux::nlmsg_ok(nl_hdr, len); nl_hdr = aux::nlmsg_next(nl_hdr, len))
{
if ((aux::nlmsg_ok(nl_hdr, read_len) == 0) || (nl_hdr->nlmsg_type == NLMSG_ERROR))
return -1;
// this function doesn't handle multiple requests at the same time
// so report an error if the message does not have the expected seq and pid
if (nl_hdr->nlmsg_seq != seq || nl_hdr->nlmsg_pid != pid)
return -1;
if (nl_hdr->nlmsg_type == NLMSG_DONE) return 0;
on_msg(nl_hdr);
if ((nl_hdr->nlmsg_flags & NLM_F_MULTI) == 0) return 0;
}
}
// return 0;
}
int nl_dump_request(int const sock, std::uint32_t const seq
, nlmsghdr* const request_msg, std::function<void(nlmsghdr const*)> on_msg)
{
request_msg->nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST;
request_msg->nlmsg_seq = seq;
// in theory nlmsg_pid should be set to the netlink port ID (NOT the process ID)
// of the sender, but the kernel ignores this field so it is typically set to
// zero
request_msg->nlmsg_pid = 0;
if (::send(sock, request_msg, request_msg->nlmsg_len, 0) < 0)
return -1;
// get the socket's port ID so that we can verify it in the response
sockaddr_nl sock_addr;
socklen_t sock_addr_len = sizeof(sock_addr);
if (::getsockname(sock, reinterpret_cast<sockaddr*>(&sock_addr), &sock_addr_len) < 0)
return -1;
| ||
relevance 0 | ../src/utp_socket_manager.cpp:204 | this should not be heap allocated, sockets should be movable |
this should not be heap allocated, sockets should be movable../src/utp_socket_manager.cpp:204
// UTP_LOGV("incoming packet id:%d source:%s\n", id, print_endpoint(ep).c_str());
if (!m_sett.get_bool(settings_pack::enable_incoming_utp))
return false;
// if not found, see if it's a SYN packet, if it is,
// create a new utp_stream
if (ph->get_type() == ST_SYN)
{
// possible SYN flood. Just ignore
if (int(m_utp_sockets.size()) > m_sett.get_int(settings_pack::connections_limit) * 2)
return false;
TORRENT_ASSERT(m_new_connection == -1);
// create the new socket with this ID
m_new_connection = id;
// UTP_LOGV("not found, new connection id:%d\n", m_new_connection);
aux::socket_type c(aux::instantiate_connection(m_ios, aux::proxy_settings(), m_ssl_context, this, true, false));
utp_stream* str = nullptr;
#ifdef TORRENT_SSL_PEERS
if (is_ssl(c))
str = &boost::get<ssl_stream<utp_stream>>(c).next_layer();
else
#endif
str = boost::get<utp_stream>(&c);
TORRENT_ASSERT(str);
int const mtu = mtu_for_dest(ep.address());
str->get_impl()->init_mtu(mtu);
str->get_impl()->m_sock = std::move(socket);
bool const ret = str->get_impl()->incoming_packet(p, ep, receive_time);
if (!ret) return false;
m_last_socket = str->get_impl();
m_cb(std::move(c));
// the connection most likely changed its connection ID here
// we need to move it to the correct ID
return true;
}
if (ph->get_type() == ST_RESET) return false;
// #error send reset
return false;
}
void utp_socket_manager::subscribe_writable(utp_socket_impl* s)
| ||
relevance 0 | ../src/ut_metadata.cpp:281 | we really need to increment the refcounter on the torrent while this buffer is still in the peer's send buffer |
we really need to increment the refcounter on the torrent
while this buffer is still in the peer's send buffer../src/ut_metadata.cpp:281 int const offset = piece * 16 * 1024;
metadata = m_tp.metadata().data() + offset;
metadata_piece_size = std::min(
int(m_tp.metadata().size()) - offset, 16 * 1024);
TORRENT_ASSERT(metadata_piece_size > 0);
TORRENT_ASSERT(offset >= 0);
TORRENT_ASSERT(offset + metadata_piece_size <= m_tp.metadata().size());
}
char msg[200];
char* header = msg;
char* p = &msg[6];
int const len = bencode(p, e);
int const total_size = 2 + len + metadata_piece_size;
namespace io = aux;
io::write_uint32(total_size, header);
io::write_uint8(bt_peer_connection::msg_extended, header);
io::write_uint8(m_message_index, header);
m_pc.send_buffer({msg, len + 6});
if (metadata_piece_size)
{
m_pc.append_const_send_buffer(
span<char>(const_cast<char*>(metadata), metadata_piece_size), metadata_piece_size);
}
m_pc.stats_counters().inc_stats_counter(counters::num_outgoing_extended);
m_pc.stats_counters().inc_stats_counter(counters::num_outgoing_metadata);
}
bool on_extended(int const length
, int const extended_msg, span<char const> body) override
{
if (extended_msg != 2) return false;
if (m_message_index == 0) return false;
if (length > 17 * 1024)
{
#ifndef TORRENT_DISABLE_LOGGING
m_pc.peer_log(peer_log_alert::incoming_message, "UT_METADATA"
, "packet too big %d", length);
#endif
m_pc.disconnect(errors::invalid_metadata_message, operation_t::bittorrent, peer_connection_interface::peer_error);
return true;
}
if (!m_pc.packet_finished()) return true;
error_code ec;
bdecode_node msg = bdecode(body, ec);
if (msg.type() != bdecode_node::dict_t)
| ||
relevance 0 | ../src/file_storage.cpp:457 | maybe it would be nice to have a better index here |
maybe it would be nice to have a better index here../src/file_storage.cpp:457 // find the file iterator and file offset
aux::file_entry target;
target.offset = aux::numeric_cast<std::uint64_t>(offset);
TORRENT_ASSERT(!compare_file_offset(target, m_files.front()));
auto file_iter = std::upper_bound(
m_files.begin(), m_files.end(), target, compare_file_offset);
TORRENT_ASSERT(file_iter != m_files.begin());
--file_iter;
return file_index_t{int(file_iter - m_files.begin())};
}
file_index_t file_storage::file_index_at_piece(piece_index_t const piece) const
{
return file_index_at_offset(static_cast<int>(piece) * std::int64_t(piece_length()));
}
file_index_t file_storage::file_index_for_root(sha256_hash const& root_hash) const
{
for (file_index_t const i : file_range())
{
if (root(i) == root_hash) return i;
}
return file_index_t{-1};
}
piece_index_t file_storage::piece_index_at_file(file_index_t f) const
{
return piece_index_t{aux::numeric_cast<int>(file_offset(f) / piece_length())};
}
#if TORRENT_ABI_VERSION <= 2
char const* file_storage::file_name_ptr(file_index_t const index) const
{
return m_files[index].name;
}
int file_storage::file_name_len(file_index_t const index) const
{
if (m_files[index].name_len == aux::file_entry::name_is_owned)
return -1;
return m_files[index].name_len;
}
#endif
std::vector<file_slice> file_storage::map_block(piece_index_t const piece
, std::int64_t const offset, std::int64_t size) const
{
TORRENT_ASSERT_PRECOND(piece >= piece_index_t{0});
TORRENT_ASSERT_PRECOND(piece < end_piece());
| ||
relevance 0 | ../src/file_storage.cpp:1242 | this would be more efficient if m_paths was sorted first, such that a lower path index always meant sorted-before |
this would be more efficient if m_paths was sorted first, such
that a lower path index always meant sorted-before../src/file_storage.cpp:1242 }
void file_storage::canonicalize_impl(bool const backwards_compatible)
{
TORRENT_ASSERT(piece_length() >= 16 * 1024);
// use this vector to track the new ordering of files
// this allows the use of STL algorithms despite them
// not supporting a custom swap functor
aux::vector<file_index_t, file_index_t> new_order(end_file());
for (auto i : file_range())
new_order[i] = i;
// remove any existing pad files
{
auto pad_begin = std::partition(new_order.begin(), new_order.end()
, [this](file_index_t i) { return !m_files[i].pad_file; });
new_order.erase(pad_begin, new_order.end());
}
// sort files by path/name
std::sort(new_order.begin(), new_order.end()
, [this](file_index_t l, file_index_t r)
{
// assuming m_paths are unique!
auto const& lf = m_files[l];
auto const& rf = m_files[r];
if (lf.path_index != rf.path_index)
{
int const ret = path_compare(m_paths[lf.path_index], lf.filename()
, m_paths[rf.path_index], rf.filename());
if (ret != 0) return ret < 0;
}
return lf.filename() < rf.filename();
});
aux::vector<aux::file_entry, file_index_t> new_files;
aux::vector<char const*, file_index_t> new_file_hashes;
aux::vector<std::time_t, file_index_t> new_mtime;
// reserve enough space for the worst case after padding
new_files.reserve(new_order.size() * 2 - 1);
if (!m_file_hashes.empty())
new_file_hashes.reserve(new_order.size() * 2 - 1);
if (!m_mtime.empty())
new_mtime.reserve(new_order.size() * 2 - 1);
// re-compute offsets and insert pad files as necessary
std::int64_t off = 0;
| ||
relevance 0 | ../src/file_storage.cpp:1345 | in C++17 this could be string_view |
in C++17 this could be string_view../src/file_storage.cpp:1345 add_pad_file(i);
}
m_files = std::move(new_files);
m_file_hashes = std::move(new_file_hashes);
m_mtime = std::move(new_mtime);
m_total_size = off;
}
void file_storage::sanitize_symlinks()
{
// symlinks are unusual, this function is optimized assuming there are no
// symbolic links in the torrent. If we find one symbolic link, we'll
// build the hash table of files it's allowed to refer to, but don't pay
// that price up-front.
std::unordered_map<std::string, file_index_t> file_map;
bool file_map_initialized = false;
// lazily instantiated set of all valid directories a symlink may point to
std::unordered_set<std::string> dir_map;
bool dir_map_initialized = false;
// symbolic links that points to directories
std::unordered_map<std::string, std::string> dir_links;
// we validate symlinks in (potentially) 2 passes over the files.
// remaining symlinks to validate after the first pass
std::vector<file_index_t> symlinks_to_validate;
for (auto const i : file_range())
{
if (!(file_flags(i) & file_storage::flag_symlink)) continue;
if (!file_map_initialized)
{
for (auto const j : file_range())
file_map.insert({internal_file_path(j), j});
file_map_initialized = true;
}
aux::file_entry const& fe = m_files[i];
TORRENT_ASSERT(fe.symlink_index < int(m_symlinks.size()));
// symlink targets are only allowed to point to files or directories in
// this torrent.
{
std::string target = m_symlinks[fe.symlink_index];
if (is_complete(target))
{
| ||
relevance 0 | ../src/random.cpp:141 | improve calling RAND_bytes multiple times, using fallback for now |
improve calling RAND_bytes multiple times, using fallback for now../src/random.cpp:141#else
std::generate(buffer.begin(), buffer.end(), [] { return char(random(0xff)); });
#endif
}
void crypto_random_bytes(span<char> buffer)
{
#ifdef TORRENT_BUILD_SIMULATOR
// In the simulator we want deterministic random numbers
std::generate(buffer.begin(), buffer.end(), [] { return char(random(0xff)); });
#elif TORRENT_USE_CNG
aux::cng_gen_random(buffer);
#elif TORRENT_USE_CRYPTOAPI
// windows
aux::crypt_gen_random(buffer);
#elif defined TORRENT_USE_LIBCRYPTO && !defined TORRENT_USE_WOLFSSL
// wolfSSL uses wc_RNG_GenerateBlock as the internal function for the
// openssl compatibility layer. This function API does not support
// an arbitrary buffer size (openssl does), it is limited by the
// constant RNG_MAX_BLOCK_LEN.
// openssl
int r = RAND_bytes(reinterpret_cast<unsigned char*>(buffer.data())
, int(buffer.size()));
if (r != 1) aux::throw_ex<system_error>(errors::no_entropy);
#elif TORRENT_USE_GETRANDOM
ssize_t const r = ::getrandom(buffer.data(), static_cast<std::size_t>(buffer.size()), 0);
if (r == ssize_t(buffer.size())) return;
if (r == -1 && errno != ENOSYS) aux::throw_ex<system_error>(error_code(errno, generic_category()));
static dev_random dev;
dev.read(buffer);
#elif TORRENT_USE_DEV_RANDOM
static dev_random dev;
dev.read(buffer);
#else
#if TORRENT_BROKEN_RANDOM_DEVICE
// even pseudo random numbers rely on being able to seed the random
// generator
#error "no entropy source available"
#else
#ifdef TORRENT_I_WANT_INSECURE_RANDOM_NUMBERS
std::generate(buffer.begin(), buffer.end(), [] { return char(random(0xff)); });
#else
#error "no secure entropy source available. If you really want insecure random numbers, define TORRENT_I_WANT_INSECURE_RANDOM_NUMBERS"
#endif
#endif
#endif
}
}
| ||
relevance 0 | ../src/mmap_disk_io.cpp:578 | in the future, propagate exceptions back to the handlers |
in the future, propagate exceptions back to the handlers../src/mmap_disk_io.cpp:578#if DEBUG_DISK_THREAD
{
std::unique_lock<std::mutex> l(m_job_mutex);
DLOG("perform_job job: %s ( %s%s) piece: %d offset: %d outstanding: %d\n"
, job_action_name[j->action]
, (j->flags & mmap_disk_job::fence) ? "fence ": ""
, (j->flags & mmap_disk_job::force_copy) ? "force_copy ": ""
, static_cast<int>(j->piece), j->d.io.offset
, j->storage ? j->storage->num_outstanding_jobs() : -1);
}
#endif
std::shared_ptr<mmap_storage> storage = j->storage;
TORRENT_ASSERT(static_cast<int>(j->action) < int(job_functions.size()));
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, 1);
// call disk function
status_t ret = status_t::no_error;
try
{
int const idx = static_cast<int>(j->action);
ret = (this->*(job_functions[static_cast<std::size_t>(idx)]))(j);
}
catch (boost::system::system_error const& err)
{
ret = status_t::fatal_disk_error;
j->error.ec = err.code();
j->error.operation = operation_t::exception;
}
catch (std::bad_alloc const&)
{
ret = status_t::fatal_disk_error;
j->error.ec = errors::no_memory;
j->error.operation = operation_t::exception;
}
catch (std::exception const&)
{
ret = status_t::fatal_disk_error;
j->error.ec = boost::asio::error::fault;
j->error.operation = operation_t::exception;
}
// note that -2 errors are OK
TORRENT_ASSERT(ret != status_t::fatal_disk_error
|| (j->error.ec && j->error.operation != operation_t::unknown));
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
| ||
relevance 0 | ../src/mmap_disk_io.cpp:1017 | this is potentially very expensive. One way to solve it would be to have a fence for just this one piece. but it hardly seems worth the complexity and cost just for the edge case of receiving a corrupt piece |
this is potentially very expensive. One way to solve
it would be to have a fence for just this one piece.
but it hardly seems worth the complexity and cost just for the edge
case of receiving a corrupt piece../src/mmap_disk_io.cpp:1017 aux::mmap_disk_job* j = m_job_pool.allocate_job(aux::job_action_t::file_priority);
j->storage = m_torrents[storage]->shared_from_this();
j->argument = std::move(prios);
j->callback = std::move(handler);
add_fence_job(j);
}
void mmap_disk_io::async_clear_piece(storage_index_t const storage
, piece_index_t const index, std::function<void(piece_index_t)> handler)
{
aux::mmap_disk_job* j = m_job_pool.allocate_job(aux::job_action_t::clear_piece);
j->storage = m_torrents[storage]->shared_from_this();
j->piece = index;
j->callback = std::move(handler);
// regular jobs are not guaranteed to be executed in-order
// since clear piece must guarantee that all write jobs that
// have been issued finish before the clear piece job completes
| ||
relevance 0 | ../src/mmap_disk_io.cpp:1022 | Perhaps the job queue could be traversed and all jobs for this piece could be cancelled. If there are no threads currently writing to this piece, we could skip the fence altogether |
Perhaps the job queue could be traversed and all jobs for this
piece could be cancelled. If there are no threads currently writing
to this piece, we could skip the fence altogether../src/mmap_disk_io.cpp:1022 j->storage = m_torrents[storage]->shared_from_this();
j->argument = std::move(prios);
j->callback = std::move(handler);
add_fence_job(j);
}
void mmap_disk_io::async_clear_piece(storage_index_t const storage
, piece_index_t const index, std::function<void(piece_index_t)> handler)
{
aux::mmap_disk_job* j = m_job_pool.allocate_job(aux::job_action_t::clear_piece);
j->storage = m_torrents[storage]->shared_from_this();
j->piece = index;
j->callback = std::move(handler);
// regular jobs are not guaranteed to be executed in-order
// since clear piece must guarantee that all write jobs that
// have been issued finish before the clear piece job completes
add_fence_job(j);
}
status_t mmap_disk_io::do_hash(aux::mmap_disk_job* j)
{
// we're not using a cache. This is the simple path
// just read straight from the file
TORRENT_ASSERT(m_magic == 0x1337);
bool const v1 = bool(j->flags & disk_interface::v1_hash);
bool const v2 = !j->d.h.block_hashes.empty();
int const piece_size = v1 ? j->storage->files().piece_size(j->piece) : 0;
int const piece_size2 = v2 ? j->storage->files().piece_size2(j->piece) : 0;
int const blocks_in_piece = v1 ? (piece_size + default_block_size - 1) / default_block_size : 0;
int const blocks_in_piece2 = v2 ? j->storage->files().blocks_in_piece2(j->piece) : 0;
aux::open_mode_t const file_mode = file_mode_for_job(j);
TORRENT_ASSERT(!v2 || int(j->d.h.block_hashes.size()) >= blocks_in_piece2);
TORRENT_ASSERT(v1 || v2);
hasher h;
int ret = 0;
int offset = 0;
int const blocks_to_read = std::max(blocks_in_piece, blocks_in_piece2);
time_point const start_time = clock_type::now();
for (int i = 0; i < blocks_to_read; ++i)
{
bool const v2_block = i < blocks_in_piece2;
DLOG("do_hash: reading (piece: %d block: %d)\n", int(j->piece), i);
| ||
relevance 0 | ../src/session.cpp:540 | In C++17. use if constexpr instead |
In C++17. use if constexpr instead../src/session.cpp:540 {}
session_proxy::session_proxy(session_proxy const&) = default;
session_proxy& session_proxy::operator=(session_proxy const&) & = default;
session_proxy::session_proxy(session_proxy&&) noexcept = default;
session_proxy& session_proxy::operator=(session_proxy&&) & noexcept = default;
session_proxy::~session_proxy()
{
if (m_thread && m_thread.use_count() == 1)
{
#if defined TORRENT_ASIO_DEBUGGING
wait_for_asio_handlers();
#endif
m_thread->join();
}
}
TORRENT_EXPORT std::unique_ptr<disk_interface> default_disk_io_constructor(
io_context& ios, settings_interface const& sett, counters& cnt)
{
#if TORRENT_HAVE_MMAP || TORRENT_HAVE_MAP_VIEW_OF_FILE
#include "libtorrent/aux_/disable_deprecation_warnings_push.hpp"
if (sizeof(void*) == 8)
return mmap_disk_io_constructor(ios, sett, cnt);
else
return posix_disk_io_constructor(ios, sett, cnt);
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#else
return posix_disk_io_constructor(ios, sett, cnt);
#endif
}
}
| ||
relevance 0 | ../src/create_torrent.cpp:611 | this can be optimized |
this can be optimized../src/create_torrent.cpp:611 std::string& attr = e["attr"].string();
if (flags & file_storage::flag_pad_file) attr += 'p';
if (flags & file_storage::flag_hidden) attr += 'h';
if (flags & file_storage::flag_executable) attr += 'x';
if (include_symlinks && (flags & file_storage::flag_symlink)) attr += 'l';
}
void add_symlink_path(entry& e, std::string symlink_path)
{
entry& sympath_e = e["symlink path"];
std::string const link = lexically_relative("", symlink_path);
for (auto elems = lsplit_path(link); !elems.first.empty();
elems = lsplit_path(elems.second))
sympath_e.list().emplace_back(elems.first);
}
}
std::vector<char> create_torrent::generate_buf() const
{
std::vector<char> ret;
bencode(std::back_inserter(ret), generate());
return ret;
}
entry create_torrent::generate() const
{
if (m_files.num_files() == 0 || m_files.total_size() == 0)
aux::throw_ex<system_error>(errors::torrent_missing_file_tree);
// if all v2 hashes are set correctly, generate the v2 parts of the
// torrent
bool const make_v2 = validate_v2_hashes(m_files, m_file_piece_hash);
bool const make_v1 = validate_v1_hashes(m_files, m_piece_hash);
// if neither v1 nor v2 hashes were set, we can't create a torrent
if (!make_v1 && !make_v2)
aux::throw_ex<system_error>(errors::invalid_hash_entry);
TORRENT_ASSERT(m_files.piece_length() > 0);
entry dict;
if (!m_urls.empty()) dict["announce"] = m_urls.front().first;
if (!m_nodes.empty())
{
entry& nodes = dict["nodes"];
entry::list_type& nodes_list = nodes.list();
for (auto const& n : m_nodes)
{
| ||
relevance 0 | ../src/add_torrent_params.cpp:78 | pre C++17, GCC and msvc does not make std::string nothrow move assignable, which means no type containing a string will be nothrow move assignable by default either static_assert(std::is_nothrow_move_assignable::value , "should be nothrow move assignable"); |
pre C++17, GCC and msvc does not make std::string nothrow move
assignable, which means no type containing a string will be nothrow move
assignable by default either
static_assert(std::is_nothrow_move_assignable::value
, "should be nothrow move assignable");../src/add_torrent_params.cpp:78 DECL_FLAG(sequential_download);
DECL_FLAG(pinned);
DECL_FLAG(stop_when_ready);
DECL_FLAG(override_trackers);
DECL_FLAG(override_web_seeds);
DECL_FLAG(need_save_resume);
DECL_FLAG(override_resume_data);
DECL_FLAG(merge_resume_trackers);
DECL_FLAG(use_resume_save_path);
DECL_FLAG(merge_resume_http_seeds);
DECL_FLAG(default_flags);
#undef DECL_FLAG
#endif // TORRENT_ABI_VERSION
static_assert(std::is_nothrow_move_constructible<add_torrent_params>::value
, "should be nothrow move constructible");
static_assert(std::is_nothrow_move_constructible<std::string>::value
, "should be nothrow move constructible");
| ||
relevance 0 | ../src/add_torrent_params.cpp:84 | it would be nice if this was nothrow default constructible static_assert(std::is_nothrow_default_constructible::value , "should be nothrow default constructible"); |
it would be nice if this was nothrow default constructible
static_assert(std::is_nothrow_default_constructible::value
, "should be nothrow default constructible");../src/add_torrent_params.cpp:84 DECL_FLAG(pinned);
DECL_FLAG(stop_when_ready);
DECL_FLAG(override_trackers);
DECL_FLAG(override_web_seeds);
DECL_FLAG(need_save_resume);
DECL_FLAG(override_resume_data);
DECL_FLAG(merge_resume_trackers);
DECL_FLAG(use_resume_save_path);
DECL_FLAG(merge_resume_http_seeds);
DECL_FLAG(default_flags);
#undef DECL_FLAG
#endif // TORRENT_ABI_VERSION
static_assert(std::is_nothrow_move_constructible<add_torrent_params>::value
, "should be nothrow move constructible");
static_assert(std::is_nothrow_move_constructible<std::string>::value
, "should be nothrow move constructible");
namespace aux {
// returns whether this add_torrent_params object has "resume-data", i.e.
// information about which pieces we have.
bool contains_resume_data(add_torrent_params const& atp)
{
return !atp.have_pieces.empty()
|| (atp.flags & torrent_flags::seed_mode);
}
}
}
| ||
relevance 0 | ../src/torrent_peer.cpp:181 | how do we deal with our external address changing? |
how do we deal with our external address changing?../src/torrent_peer.cpp:181 // connections. If it fails, we'll
// retry with encryption
, pe_support(false)
#endif
, is_v6_addr(false)
#if TORRENT_USE_I2P
, is_i2p_addr(false)
#endif
, on_parole(false)
, banned(false)
, supports_utp(true) // assume peers support utp
, confirmed_supports_utp(false)
, supports_holepunch(false)
, web_seed(false)
, protocol_v2(false)
{}
std::uint32_t torrent_peer::rank(external_ip const& external, int external_port) const
{
TORRENT_ASSERT(in_use);
if (peer_rank == 0)
peer_rank = peer_priority(
tcp::endpoint(external.external_address(this->address()), std::uint16_t(external_port))
, tcp::endpoint(this->address(), this->port));
return peer_rank;
}
#ifndef TORRENT_DISABLE_LOGGING
std::string torrent_peer::to_string() const
{
TORRENT_ASSERT(in_use);
#if TORRENT_USE_I2P
if (is_i2p_addr) return dest().to_string();
#endif // TORRENT_USE_I2P
return address().to_string();
}
#endif
std::int64_t torrent_peer::total_download() const
{
TORRENT_ASSERT(in_use);
if (connection != nullptr)
{
TORRENT_ASSERT(prev_amount_download == 0);
return connection->statistics().total_payload_download();
}
else
{
return std::int64_t(prev_amount_download) << 10;
}
}
| ||
relevance 0 | ../src/alert.cpp:404 | move this field into tracker_alert |
move this field into tracker_alert../src/alert.cpp:404
return torrent_alert::message() + ": state changed to: "
+ state_str[state];
#endif
}
tracker_error_alert::tracker_error_alert(aux::stack_allocator& alloc
, torrent_handle const& h, tcp::endpoint const& ep, int times
, protocol_version v, string_view u, operation_t const operation
, error_code const& e
, string_view m)
: tracker_alert(alloc, h, ep, u)
, times_in_row(times)
, error(e)
, op(operation)
, m_msg_idx(alloc.copy_string(m))
#if TORRENT_ABI_VERSION == 1
, status_code(e && e.category() == http_category() ? e.value() : -1)
, msg(m)
#endif
, version(v)
{
TORRENT_ASSERT(!u.empty());
}
char const* tracker_error_alert::failure_reason() const
{
return m_alloc.get().ptr(m_msg_idx);
}
std::string tracker_error_alert::message() const
{
#ifdef TORRENT_DISABLE_ALERT_MSG
return {};
#else
char ret[400];
std::snprintf(ret, sizeof(ret), "%s %s %s \"%s\" (%d)"
, tracker_alert::message().c_str()
, version == protocol_version::V1 ? "v1" : "v2"
, convert_from_native(error.message()).c_str(), error_message()
, times_in_row);
return ret;
#endif
}
tracker_warning_alert::tracker_warning_alert(aux::stack_allocator& alloc
, torrent_handle const& h, tcp::endpoint const& ep
, string_view u, protocol_version v, string_view m)
: tracker_alert(alloc, h, ep, u)
, m_msg_idx(alloc.copy_string(m))
#if TORRENT_ABI_VERSION == 1
| ||
relevance 0 | ../src/alert.cpp:438 | move this into tracker_alert |
move this into tracker_alert../src/alert.cpp:438 return {};
#else
char ret[400];
std::snprintf(ret, sizeof(ret), "%s %s %s \"%s\" (%d)"
, tracker_alert::message().c_str()
, version == protocol_version::V1 ? "v1" : "v2"
, convert_from_native(error.message()).c_str(), error_message()
, times_in_row);
return ret;
#endif
}
tracker_warning_alert::tracker_warning_alert(aux::stack_allocator& alloc
, torrent_handle const& h, tcp::endpoint const& ep
, string_view u, protocol_version v, string_view m)
: tracker_alert(alloc, h, ep, u)
, m_msg_idx(alloc.copy_string(m))
#if TORRENT_ABI_VERSION == 1
, msg(m)
#endif
, version(v)
{
TORRENT_ASSERT(!u.empty());
}
char const* tracker_warning_alert::warning_message() const
{
return m_alloc.get().ptr(m_msg_idx);
}
std::string tracker_warning_alert::message() const
{
#ifdef TORRENT_DISABLE_ALERT_MSG
return {};
#else
return tracker_alert::message() + (version == protocol_version::V1 ? " v1" : " v2") + " warning: " + warning_message();
#endif
}
scrape_reply_alert::scrape_reply_alert(aux::stack_allocator& alloc
, torrent_handle const& h, tcp::endpoint const& ep
, int incomp, int comp, string_view u, protocol_version const v)
: tracker_alert(alloc, h, ep, u)
, incomplete(incomp)
, complete(comp)
| ||
relevance 0 | ../src/alert.cpp:464 | move this into tracker_alert |
move this into tracker_alert../src/alert.cpp:464 char const* tracker_warning_alert::warning_message() const
{
return m_alloc.get().ptr(m_msg_idx);
}
std::string tracker_warning_alert::message() const
{
#ifdef TORRENT_DISABLE_ALERT_MSG
return {};
#else
return tracker_alert::message() + (version == protocol_version::V1 ? " v1" : " v2") + " warning: " + warning_message();
#endif
}
scrape_reply_alert::scrape_reply_alert(aux::stack_allocator& alloc
, torrent_handle const& h, tcp::endpoint const& ep
, int incomp, int comp, string_view u, protocol_version const v)
: tracker_alert(alloc, h, ep, u)
, incomplete(incomp)
, complete(comp)
, version(v)
{
TORRENT_ASSERT(!u.empty());
}
std::string scrape_reply_alert::message() const
{
#ifdef TORRENT_DISABLE_ALERT_MSG
return {};
#else
char ret[400];
std::snprintf(ret, sizeof(ret), "%s %s scrape reply: %d %d"
, tracker_alert::message().c_str()
, version == protocol_version::V1 ? "v1" : "v2"
, incomplete, complete);
return ret;
#endif
}
scrape_failed_alert::scrape_failed_alert(aux::stack_allocator& alloc
, torrent_handle const& h, tcp::endpoint const& ep
, string_view u, protocol_version const v, error_code const& e)
: tracker_alert(alloc, h, ep, u)
, error(e)
, m_msg_idx()
#if TORRENT_ABI_VERSION == 1
, msg(convert_from_native(e.message()))
#endif
| ||
relevance 0 | ../src/alert.cpp:493 | move this into tracker_alert |
move this into tracker_alert../src/alert.cpp:493 return {};
#else
char ret[400];
std::snprintf(ret, sizeof(ret), "%s %s scrape reply: %d %d"
, tracker_alert::message().c_str()
, version == protocol_version::V1 ? "v1" : "v2"
, incomplete, complete);
return ret;
#endif
}
scrape_failed_alert::scrape_failed_alert(aux::stack_allocator& alloc
, torrent_handle const& h, tcp::endpoint const& ep
, string_view u, protocol_version const v, error_code const& e)
: tracker_alert(alloc, h, ep, u)
, error(e)
, m_msg_idx()
#if TORRENT_ABI_VERSION == 1
, msg(convert_from_native(e.message()))
#endif
, version(v)
{
TORRENT_ASSERT(!u.empty());
}
scrape_failed_alert::scrape_failed_alert(aux::stack_allocator& alloc
, torrent_handle const& h, tcp::endpoint const& ep
, string_view u, string_view m)
: tracker_alert(alloc, h, ep, u)
, error(errors::tracker_failure)
, m_msg_idx(alloc.copy_string(m))
#if TORRENT_ABI_VERSION == 1
, msg(m)
#endif
{
TORRENT_ASSERT(!u.empty());
}
char const* scrape_failed_alert::error_message() const
{
if (m_msg_idx == aux::allocation_slot()) return "";
else return m_alloc.get().ptr(m_msg_idx);
}
std::string scrape_failed_alert::message() const
{
#ifdef TORRENT_DISABLE_ALERT_MSG
return {};
#else
return tracker_alert::message() + " scrape failed: " + error_message();
#endif
| ||
relevance 0 | ../src/alert.cpp:532 | move this field into tracker_alert |
move this field into tracker_alert../src/alert.cpp:532 char const* scrape_failed_alert::error_message() const
{
if (m_msg_idx == aux::allocation_slot()) return "";
else return m_alloc.get().ptr(m_msg_idx);
}
std::string scrape_failed_alert::message() const
{
#ifdef TORRENT_DISABLE_ALERT_MSG
return {};
#else
return tracker_alert::message() + " scrape failed: " + error_message();
#endif
}
tracker_reply_alert::tracker_reply_alert(aux::stack_allocator& alloc
, torrent_handle const& h, tcp::endpoint const& ep
, int np, protocol_version v, string_view u)
: tracker_alert(alloc, h, ep, u)
, num_peers(np)
, version(v)
{
TORRENT_ASSERT(!u.empty());
}
std::string tracker_reply_alert::message() const
{
#ifdef TORRENT_DISABLE_ALERT_MSG
return {};
#else
char ret[400];
std::snprintf(ret, sizeof(ret), "%s %s received peers: %d"
, tracker_alert::message().c_str()
, version == protocol_version::V1 ? "v1" : "v2"
, num_peers);
return ret;
#endif
}
dht_reply_alert::dht_reply_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, int np)
: tracker_alert(alloc, h, {}, "")
, num_peers(np)
{}
std::string dht_reply_alert::message() const
{
#ifdef TORRENT_DISABLE_ALERT_MSG
return {};
#else
| ||
relevance 0 | ../src/alert.cpp:576 | move this to tracker_alert |
move this to tracker_alert../src/alert.cpp:576 , num_peers(np)
{}
std::string dht_reply_alert::message() const
{
#ifdef TORRENT_DISABLE_ALERT_MSG
return {};
#else
char ret[400];
std::snprintf(ret, sizeof(ret), "%s received DHT peers: %d"
, tracker_alert::message().c_str(), num_peers);
return ret;
#endif
}
tracker_announce_alert::tracker_announce_alert(aux::stack_allocator& alloc
, torrent_handle const& h, tcp::endpoint const& ep, string_view u
, protocol_version const v, event_t const e)
: tracker_alert(alloc, h, ep, u)
, event(e)
, version(v)
{
TORRENT_ASSERT(!u.empty());
}
std::string tracker_announce_alert::message() const
{
#ifdef TORRENT_DISABLE_ALERT_MSG
return {};
#else
static const char* const event_str[] = {"none", "completed", "started", "stopped", "paused"};
return tracker_alert::message()
+ (version == protocol_version::V1 ? " v1" : " v2")
+ " sending announce (" + event_str[static_cast<int>(event)] + ")";
#endif
}
hash_failed_alert::hash_failed_alert(
aux::stack_allocator& alloc
, torrent_handle const& h
, piece_index_t index)
: torrent_alert(alloc, h)
, piece_index(index)
{
TORRENT_ASSERT(index >= piece_index_t(0));
}
std::string hash_failed_alert::message() const
{
#ifdef TORRENT_DISABLE_ALERT_MSG
return {};
| ||
relevance 0 | ../src/udp_tracker_connection.cpp:633 | why is this a linked list? |
why is this a linked list?../src/udp_tracker_connection.cpp:633 ipv6_peer_entry e{};
std::memcpy(e.ip.data(), buf.data(), 16);
buf = buf.subspan(16);
e.port = aux::read_uint16(buf);
resp.peers6.push_back(e);
}
}
else
{
resp.peers4.reserve(static_cast<std::size_t>(num_peers));
for (int i = 0; i < num_peers; ++i)
{
ipv4_peer_entry e{};
std::memcpy(e.ip.data(), buf.data(), 4);
buf = buf.subspan(4);
e.port = aux::read_uint16(buf);
resp.peers4.push_back(e);
}
}
std::list<address> ip_list;
std::transform(m_endpoints.begin(), m_endpoints.end(), std::back_inserter(ip_list)
, [](tcp::endpoint const& ep) { return ep.address(); } );
cb->tracker_response(tracker_req(), m_target.address(), ip_list, resp);
close();
return true;
}
bool udp_tracker_connection::on_scrape_response(span<char const> buf)
{
restart_read_timeout();
auto const action = static_cast<action_t>(aux::read_int32(buf));
std::uint32_t const transaction = aux::read_uint32(buf);
if (transaction != m_transaction_id)
{
fail(error_code(errors::invalid_tracker_transaction_id), operation_t::bittorrent);
return false;
}
if (action == action_t::error)
{
fail(error_code(errors::tracker_failure), operation_t::bittorrent
, std::string(buf.data(), static_cast<std::size_t>(buf.size())).c_str());
return true;
}
if (action != action_t::scrape)
{
| ||
relevance 0 | ../src/session_handle.cpp:485 | in C++14, use unique_ptr and move it into the lambda |
in C++14, use unique_ptr and move it into the lambda../src/session_handle.cpp:485 async_add_torrent(add_torrent_params(params));
}
void session_handle::async_add_torrent(add_torrent_params&& params)
{
TORRENT_ASSERT_PRECOND(!params.save_path.empty());
#if TORRENT_ABI_VERSION < 3
if (!params.info_hashes.has_v1() && !params.info_hashes.has_v2() && !params.ti)
params.info_hashes.v1 = params.info_hash;
#endif
// the internal torrent object keeps and mutates state in the
// torrent_info object. We can't let that leak back to the client
if (params.ti)
params.ti = std::make_shared<torrent_info>(*params.ti);
// we cannot capture a unique_ptr into a lambda in c++11, so we use a raw
// pointer for now. async_call uses a lambda expression to post the call
// to the main thread
auto* p = new add_torrent_params(std::move(params));
auto guard = aux::scope_end([p]{ delete p; });
p->save_path = complete(p->save_path);
#if TORRENT_ABI_VERSION == 1
handle_backwards_compatible_resume_data(*p);
#endif
async_call(&session_impl::async_add_torrent, p);
guard.disarm();
}
#ifndef BOOST_NO_EXCEPTIONS
#if TORRENT_ABI_VERSION == 1
// if the torrent already exists, this will throw duplicate_torrent
torrent_handle session_handle::add_torrent(
torrent_info const& ti
, std::string const& save_path
, entry const& resume_data
, storage_mode_t storage_mode
, bool const add_paused)
{
add_torrent_params p;
p.ti = std::make_shared<torrent_info>(ti);
p.save_path = save_path;
if (resume_data.type() != entry::undefined_t)
{
bencode(std::back_inserter(p.resume_data), resume_data);
}
p.storage_mode = storage_mode;
if (add_paused) p.flags |= add_torrent_params::flag_paused;
| ||
relevance 0 | ../src/http_seed_connection.cpp:441 | technically, this isn't supposed to happen, but it seems to sometimes. Some of the accounting is probably wrong in certain cases |
technically, this isn't supposed to happen, but it seems to
sometimes. Some of the accounting is probably wrong in certain
cases../src/http_seed_connection.cpp:441 // cut out the chunk header from the receive buffer
TORRENT_ASSERT(m_chunk_pos + m_body_start < INT_MAX);
m_recv_buffer.cut(header_size, t->block_size() + 1024, aux::numeric_cast<int>(m_chunk_pos + m_body_start));
recv_buffer = m_recv_buffer.get();
recv_buffer = recv_buffer.subspan(m_body_start);
m_chunk_pos += chunk_size;
if (chunk_size == 0)
{
TORRENT_ASSERT(m_recv_buffer.get().size() < m_chunk_pos + m_body_start + 1
|| m_recv_buffer.get()[static_cast<std::ptrdiff_t>(m_chunk_pos + m_body_start)] == 'H'
|| (m_parser.chunked_encoding()
&& m_recv_buffer.get()[static_cast<std::ptrdiff_t>(m_chunk_pos + m_body_start)] == '\r'));
m_chunk_pos = -1;
}
}
}
int payload = int(bytes_transferred);
if (payload > m_response_left) payload = int(m_response_left);
if (payload > front_request.length) payload = front_request.length;
if (payload > outstanding_bytes()) payload = outstanding_bytes();
received_bytes(payload, 0);
incoming_piece_fragment(payload);
m_response_left -= payload;
if (m_parser.status_code() == 503)
{
if (!m_parser.finished()) return;
int retry_time = std::atoi(std::string(recv_buffer.begin(), recv_buffer.end()).c_str());
if (retry_time <= 0) retry_time = 60;
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "CONNECT", "retrying in %d seconds", retry_time);
#endif
received_bytes(0, int(bytes_transferred));
// temporarily unavailable, retry later
t->retry_web_seed(this, seconds32(retry_time));
disconnect(error_code(m_parser.status_code(), http_category()), operation_t::bittorrent, failure);
return;
}
// we only received the header, no data
if (recv_buffer.empty()) break;
if (recv_buffer.size() < front_request.length) break;
// if the response is chunked, we need to receive the last
// terminating chunk and the tail headers before we can proceed
if (m_parser.chunked_encoding() && m_chunk_pos >= 0) break;
| ||
relevance 0 | ../src/utp_stream.cpp:1467 | this loop is not very efficient. It could be fixed by having a separate list of sequence numbers that need resending |
this loop is not very efficient. It could be fixed by having
a separate list of sequence numbers that need resending../src/utp_stream.cpp:1467#endif
if (m_stalled)
{
if (flags & pkt_ack)
defer_ack();
return false;
}
// m_out_eof means we're trying to close the write side of this socket,
// we need to flush all payload before we can send the FIN packet, so don't
// store any payload in the nagle packet
bool const force = (flags & pkt_ack) || (flags & pkt_fin) || m_out_eof;
// when we want to close the outgoing stream, we need to send the
// remaining nagle packet even though it won't fill a packet.
bool const force_flush_nagle = m_out_eof && m_write_buffer_size;
// first see if we need to resend any packets
for (int i = (m_acked_seq_nr + 1) & ACK_MASK; i != m_seq_nr; i = (i + 1) & ACK_MASK)
{
packet* p = m_outbuf.at(aux::numeric_cast<packet_buffer::index_type>(i));
if (!p) continue;
if (!p->need_resend) continue;
if (!resend_packet(p))
{
// we couldn't resend the packet. It probably doesn't
// fit in our cwnd. If force is set, we need to continue
// to send our packet anyway, if we don't have force set,
// we might as well return
if (!force) return false;
// resend_packet might have failed
if (state() == state_t::error_wait || state() == state_t::deleting) return false;
if (m_stalled) return false;
break;
}
// don't fast-resend this packet
if (m_fast_resend_seq_nr == i)
m_fast_resend_seq_nr = (m_fast_resend_seq_nr + 1) & ACK_MASK;
}
// MTU DISCOVERY
// under these conditions, the next packet we send should be an MTU probe.
// MTU probes get to use the mid-point packet size, whereas other packets
// use a conservative packet size of the largest known to work. The reason
// for the cwnd condition is to make sure the probe is surrounded by non-
// probes, to be able to distinguish a loss of the probe vs. just loss in
// general.
| ||
relevance 0 | ../src/disabled_disk_io.cpp:106 | it would be nice to return a valid hash of zeroes here |
it would be nice to return a valid hash of zeroes here../src/disabled_disk_io.cpp:106 });
}
bool async_write(storage_index_t
, peer_request const& r
, char const*, std::shared_ptr<disk_observer>
, std::function<void(storage_error const&)> handler
, disk_job_flags_t) override
{
TORRENT_ASSERT(r.length <= default_block_size);
TORRENT_UNUSED(r);
post(m_ios, [h = std::move(handler)] { h(storage_error{}); });
return false;
}
void async_hash(storage_index_t
, piece_index_t piece, span<sha256_hash>, disk_job_flags_t
, std::function<void(piece_index_t, sha1_hash const&, storage_error const&)> handler) override
{
post(m_ios, [h = std::move(handler), piece] { h(piece, sha1_hash{}, storage_error{}); });
}
void async_hash2(storage_index_t, piece_index_t piece, int
, disk_job_flags_t
, std::function<void(piece_index_t, sha256_hash const&, storage_error const&)> handler) override
{
post(m_ios, [h = std::move(handler), piece]() { h(piece, sha256_hash{}, storage_error{}); });
}
void async_move_storage(storage_index_t
, std::string p, move_flags_t
, std::function<void(status_t, std::string const&, storage_error const&)> handler) override
{
post(m_ios, [h = std::move(handler), path = std::move(p)] () mutable
{ h(status_t::no_error, std::move(path), storage_error{}); });
}
void async_release_files(storage_index_t, std::function<void()> handler) override
{
post(m_ios, [h = std::move(handler)] { h(); });
}
void async_delete_files(storage_index_t
, remove_flags_t, std::function<void(storage_error const&)> handler) override
{
post(m_ios, [h = std::move(handler)] { h(storage_error{}); });
}
void async_check_files(storage_index_t
, add_torrent_params const*
| ||
relevance 0 | ../src/magnet_uri.cpp:439 | what's the right number here? |
what's the right number here?../src/magnet_uri.cpp:439 }
else if (string_equal_no_case(name, "so"_sv)) // select-only (files)
{
// accept only digits, '-' and ','
if (std::any_of(value.begin(), value.end(), [](char c)
{ return !is_digit(c) && c != '-' && c != ','; }))
continue;
// make sure all file priorities are set to 0, except the ones
// we specify in the file_priorities
p.flags |= torrent_flags::default_dont_download;
do
{
string_view token;
std::tie(token, value) = split_string(value, ',');
if (token.empty()) continue;
int idx1, idx2;
constexpr int max_index = 10000; // can't risk out of memory
auto const divider = token.find_first_of('-');
if (divider != std::string::npos) // it's a range
{
if (divider == 0) // no start index
continue;
if (divider == token.size() - 1) // no end index
continue;
idx1 = std::atoi(token.substr(0, divider).to_string().c_str());
if (idx1 < 0 || idx1 > max_index) // invalid index
continue;
idx2 = std::atoi(token.substr(divider + 1).to_string().c_str());
if (idx2 < 0 || idx2 > max_index) // invalid index
continue;
if (idx1 > idx2) // wrong range limits
continue;
}
else // it's an index
{
idx1 = std::atoi(token.to_string().c_str());
if (idx1 < 0 || idx1 > max_index) // invalid index
continue;
idx2 = idx1;
}
if (int(p.file_priorities.size()) <= idx2)
p.file_priorities.resize(static_cast<std::size_t>(idx2) + 1, dont_download);
| ||
relevance 0 | ../src/choker.cpp:255 | make configurable |
make configurable../src/choker.cpp:255 // first reset the number of unchoke slots, because we'll calculate
// it purely based on the current state of our peers.
upload_slots = 0;
int rate_threshold = sett.get_int(settings_pack::rate_choker_initial_threshold);
std::sort(peers.begin(), peers.end()
, [](peer_connection const* lhs, peer_connection const* rhs)
{ return upload_rate_compare(lhs, rhs); });
for (auto const* p : peers)
{
int const rate = int(p->uploaded_in_last_round()
* 1000 / total_milliseconds(unchoke_interval));
// always have at least 1 unchoke slot
if (rate < rate_threshold) break;
++upload_slots;
rate_threshold += 2048;
}
++upload_slots;
}
// sorts the peers that are eligible for unchoke by download rate and
// secondary by total upload. The reason for this is, if all torrents are
// being seeded, the download rate will be 0, and the peers we have sent
// the least to should be unchoked
// we use partial sort here, because we only care about the top
// upload_slots peers.
int const slots = std::min(upload_slots, int(peers.size()));
if (sett.get_int(settings_pack::seed_choking_algorithm)
== settings_pack::round_robin)
{
int const pieces = sett.get_int(settings_pack::seeding_piece_quota);
std::nth_element(peers.begin(), peers.begin()
+ slots, peers.end()
, [pieces](peer_connection const* lhs, peer_connection const* rhs)
{ return unchoke_compare_rr(lhs, rhs, pieces); });
}
else if (sett.get_int(settings_pack::seed_choking_algorithm)
== settings_pack::fastest_upload)
{
std::nth_element(peers.begin(), peers.begin()
+ slots, peers.end()
, [](peer_connection const* lhs, peer_connection const* rhs)
| ||
relevance 0 | ../src/posix_part_file.cpp:337 | what do we do if someone is currently reading from the disk from this piece? does it matter? Since we won't actively erase the data from disk, but it may be overwritten soon, it's probably not that big of a deal |
what do we do if someone is currently reading from the disk
from this piece? does it matter? Since we won't actively erase the
data from disk, but it may be overwritten soon, it's probably not that
big of a deal../src/posix_part_file.cpp:337
if (ec) return {};
#ifdef TORRENT_WINDOWS
ret = file_pointer(::_wfopen(convert_to_native_path_string(fn).c_str(), L"wb+"));
#else
ret = file_pointer(::fopen(fn.c_str(), "wb+"));
#endif
if (ret.file() == nullptr)
ec.assign(errno, generic_category());
}
if (ec) return {};
return ret;
}
void posix_part_file::free_piece(piece_index_t const piece)
{
auto const i = m_piece_map.find(piece);
if (i == m_piece_map.end()) return;
m_free_slots.push_back(i->second);
m_piece_map.erase(i);
m_dirty_metadata = true;
}
void posix_part_file::move_partfile(std::string const& path, error_code& ec)
{
flush_metadata_impl(ec);
if (ec) return;
if (!m_piece_map.empty())
{
std::string old_path = combine_path(m_path, m_name);
std::string new_path = combine_path(path, m_name);
rename(old_path, new_path, ec);
if (ec == boost::system::errc::no_such_file_or_directory)
ec.clear();
if (ec)
{
storage_error se;
aux::copy_file(old_path, new_path, se);
ec = se.ec;
if (ec) return;
remove(old_path, ec);
}
}
m_path = path;
}
| ||
relevance 0 | ../src/posix_part_file.cpp:425 | instead of rebuilding the whole file header and flushing it, update the slot entries as we go |
instead of rebuilding the whole file header
and flushing it, update the slot entries as we go../src/posix_part_file.cpp:425 auto bytes_read = std::fread(buf.get(), 1, std::size_t(block_to_copy), file.file());
if (int(bytes_read) != block_to_copy)
ec.assign(errno, generic_category());
TORRENT_ASSERT(!ec);
if (ec) return;
f(file_offset, {buf.get(), block_to_copy});
}
file_offset += block_to_copy;
piece_offset = 0;
size -= block_to_copy;
}
}
void posix_part_file::flush_metadata(error_code& ec)
{
flush_metadata_impl(ec);
}
void posix_part_file::flush_metadata_impl(error_code& ec)
{
// do we need to flush the metadata?
if (m_dirty_metadata == false) return;
if (m_piece_map.empty())
{
// if we don't have any pieces left in the
// part file, remove it
std::string const p = combine_path(m_path, m_name);
remove(p, ec);
if (ec == boost::system::errc::no_such_file_or_directory)
ec.clear();
return;
}
auto f = open_file(open_mode::read_write, ec);
if (ec) return;
std::vector<char> header(static_cast<std::size_t>(m_header_size));
using namespace libtorrent::aux;
char* ptr = header.data();
write_uint32(m_max_pieces, ptr);
write_uint32(m_piece_size, ptr);
for (piece_index_t piece(0); piece < piece_index_t(m_max_pieces); ++piece)
{
auto const i = m_piece_map.find(piece);
| ||
relevance 0 | ../src/udp_socket.cpp:659 | perhaps an attempt should be made to bind m_socks5_sock to the device of m_listen_socket |
perhaps an attempt should be made to bind m_socks5_sock to the
device of m_listen_socket../src/udp_socket.cpp:659 if (ec)
{
if (m_alerts.should_post<socks5_alert>())
m_alerts.emplace_alert<socks5_alert>(m_proxy_addr, operation_t::sock_option, ec);
ec.clear();
}
#endif
#endif
tcp::endpoint const bind_ep(m_listen_socket.get_local_endpoint().address(), 0);
m_socks5_sock.bind(bind_ep, ec);
if (ec)
{
if (m_alerts.should_post<socks5_alert>())
m_alerts.emplace_alert<socks5_alert>(m_proxy_addr, operation_t::sock_bind, ec);
++m_failures;
retry_connection();
return;
}
ADD_OUTSTANDING_ASYNC("socks5::on_connected");
m_socks5_sock.async_connect(m_proxy_addr
, std::bind(&socks5::on_connected, self(), _1));
ADD_OUTSTANDING_ASYNC("socks5::on_connect_timeout");
m_timer.expires_after(seconds(10));
m_timer.async_wait(std::bind(&socks5::on_connect_timeout
, self(), _1));
}
void socks5::on_connect_timeout(error_code const& e)
{
COMPLETE_ASYNC("socks5::on_connect_timeout");
if (e == boost::asio::error::operation_aborted) return;
if (m_abort) return;
if (m_alerts.should_post<socks5_alert>())
m_alerts.emplace_alert<socks5_alert>(m_proxy_addr, operation_t::connect, errors::timed_out);
error_code ignore;
m_socks5_sock.close(ignore);
++m_failures;
retry_connection();
}
void socks5::on_connected(error_code const& e)
{
| ||
relevance 0 | ../src/cpuid.cpp:131 | enable when aarch64 is really tested |
enable when aarch64 is really tested../src/cpuid.cpp:131 bool supports_mmx() noexcept
{
#if TORRENT_HAS_SSE
std::uint32_t cpui[4] = {0};
cpuid(cpui, 1);
return (cpui[2] & (1 << 23)) != 0;
#else
return false;
#endif
}
bool supports_arm_neon() noexcept
{
#if TORRENT_HAS_ARM_NEON && TORRENT_HAS_AUXV
#if defined __arm__
//return (getauxval(AT_HWCAP) & HWCAP_NEON);
return (helper_getauxval(16) & (1 << 12));
#elif defined __aarch64__
//return (getauxval(AT_HWCAP) & HWCAP_ASIMD);
//return (getauxval(16) & (1 << 1));
return false;
#endif
#else
return false;
#endif
}
bool supports_arm_crc32c() noexcept
{
#if TORRENT_HAS_ARM_CRC32 && TORRENT_HAS_AUXV
#if defined TORRENT_FORCE_ARM_CRC32
return true;
#elif defined __arm__
//return (getauxval(AT_HWCAP2) & HWCAP2_CRC32);
return (helper_getauxval(26) & (1 << 4));
#elif defined __aarch64__
//return (getauxval(AT_HWCAP) & HWCAP_CRC32);
return (helper_getauxval(16) & (1 << 7));
#endif
#else
return false;
#endif
}
} // anonymous namespace
bool const sse42_support = supports_sse42();
bool const mmx_support = supports_mmx();
bool const arm_neon_support = supports_arm_neon();
bool const arm_crc32c_support = supports_arm_crc32c();
} }
| ||
relevance 0 | ../src/storage_utils.cpp:230 | ideally, if we end up copying files because of a move across volumes, the source should not be deleted until they've all been copied. That would let us rollback with higher confidence. |
ideally, if we end up copying files because of a move across
volumes, the source should not be deleted until they've all been
copied. That would let us rollback with higher confidence.../src/storage_utils.cpp:230 // later
aux::vector<bool, file_index_t> copied_files(std::size_t(f.num_files()), false);
// track how far we got in case of an error
file_index_t file_index{};
for (auto const i : f.file_range())
{
// files moved out to absolute paths are not moved
if (f.file_absolute_path(i)) continue;
std::string const old_path = combine_path(save_path, f.file_path(i));
std::string const new_path = combine_path(new_save_path, f.file_path(i));
error_code ignore;
if (flags == move_flags_t::dont_replace && exists(new_path, ignore))
{
if (ret == status_t::no_error) ret = status_t::need_full_check;
continue;
}
move_file(old_path, new_path, ec);
// if the source file doesn't exist. That's not a problem
// we just ignore that file
if (ec.ec == boost::system::errc::no_such_file_or_directory)
ec.ec.clear();
else if (ec
&& ec.ec != boost::system::errc::invalid_argument
&& ec.ec != boost::system::errc::permission_denied)
{
// moving the file failed
// on OSX, the error when trying to rename a file across different
// volumes is EXDEV, which will make it fall back to copying.
ec.ec.clear();
copy_file(old_path, new_path, ec);
if (!ec) copied_files[i] = true;
}
if (ec)
{
ec.file(i);
file_index = i;
break;
}
}
if (!ec && move_partfile)
{
error_code e;
move_partfile(new_save_path, e);
if (e)
| ||
relevance 0 | ../src/storage_utils.cpp:538 | it would seem reasonable to, instead, set the have_pieces bits for the pieces representing these files, and resume with the normal logic |
it would seem reasonable to, instead, set the have_pieces bits
for the pieces representing these files, and resume with the normal
logic../src/storage_utils.cpp:538 continue;
std::int64_t const size = get_filesize(stat, file_index, fs
, save_path, ec);
if (size < 0) return false;
if (size < fs.file_size(file_index))
{
ec.ec = errors::mismatching_file_size;
ec.file(file_index);
ec.operation = operation_t::check_resume;
return false;
}
}
return true;
}
#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
// always trigger a full recheck when we pull in files from other
// torrents, via hard links
if (added_files) return false;
#endif
// parse have bitmask. Verify that the files we expect to have
// actually do exist
piece_index_t const end_piece = std::min(rd.have_pieces.end_index(), fs.end_piece());
for (piece_index_t i(0); i < end_piece; ++i)
{
if (rd.have_pieces.get_bit(i) == false) continue;
std::vector<file_slice> f = fs.map_block(i, 0, 1);
TORRENT_ASSERT(!f.empty());
file_index_t const file_index = f[0].file_index;
// files with priority zero may not have been saved to disk at their
// expected location, but is likely to be in a partfile. Just exempt it
// from checking
if (file_index < file_priority.end_index()
&& file_priority[file_index] == dont_download)
continue;
if (fs.pad_file_at(file_index)) continue;
if (get_filesize(stat, file_index, fs, save_path, ec) < 0)
return false;
// OK, this file existed, good. Now, skip all remaining pieces in
// this file. We're just sanity-checking whether the files exist
// or not.
peer_request const pr = fs.map_file(file_index
| ||
relevance 0 | ../src/torrent_handle.cpp:589 | support moving files into this call |
support moving files into this call../src/torrent_handle.cpp:589 auto retp = &prio;
sync_call(&torrent::piece_priorities, retp);
std::vector<int> ret;
ret.reserve(prio.size());
for (auto p : prio)
ret.push_back(int(static_cast<std::uint8_t>(p)));
return ret;
}
#endif
void torrent_handle::file_priority(file_index_t index, download_priority_t priority) const
{
async_call(&torrent::set_file_priority, index, priority);
}
download_priority_t torrent_handle::file_priority(file_index_t index) const
{
return sync_call_ret<download_priority_t>(dont_download, &torrent::file_priority, index);
}
void torrent_handle::prioritize_files(std::vector<download_priority_t> const& files) const
{
async_call(&torrent::prioritize_files
, static_cast<aux::vector<download_priority_t, file_index_t> const&>(files));
}
std::vector<download_priority_t> torrent_handle::get_file_priorities() const
{
aux::vector<download_priority_t, file_index_t> ret;
auto retp = &ret;
sync_call(&torrent::file_priorities, retp);
return TORRENT_RVO(ret);
}
#if TORRENT_ABI_VERSION == 1
// ============ start deprecation ===============
void torrent_handle::prioritize_files(std::vector<int> const& files) const
{
aux::vector<download_priority_t, file_index_t> file_prio;
file_prio.reserve(files.size());
for (auto const p : files) {
file_prio.push_back(download_priority_t(static_cast<std::uint8_t>(p)));
}
async_call(&torrent::prioritize_files, file_prio);
}
std::vector<int> torrent_handle::file_priorities() const
{
aux::vector<download_priority_t, file_index_t> prio;
| ||
relevance 0 | ../src/piece_picker.cpp:121 | find a better place for this |
find a better place for this../src/piece_picker.cpp:121 if (limit == 0)
{
std::cerr << " ...";
break;
}
if (*i == -1) break;
while (j != p.m_priority_boundaries.end() && *j <= index)
{
std::cerr << "| ";
++j;
}
std::cerr << *i << "(" << p.m_piece_map[*i].index << ") ";
--limit;
}
std::cerr << std::endl;
}
}
#endif // TORRENT_PICKER_LOG
namespace libtorrent {
const piece_block piece_block::invalid(
std::numeric_limits<piece_index_t>::max()
, std::numeric_limits<int>::max());
constexpr prio_index_t piece_picker::piece_pos::we_have_index;
constexpr picker_options_t piece_picker::rarest_first;
constexpr picker_options_t piece_picker::reverse;
constexpr picker_options_t piece_picker::on_parole;
constexpr picker_options_t piece_picker::prioritize_partials;
constexpr picker_options_t piece_picker::sequential;
constexpr picker_options_t piece_picker::align_expanded_pieces;
constexpr picker_options_t piece_picker::piece_extent_affinity;
constexpr download_queue_t piece_picker::piece_pos::piece_downloading;
constexpr download_queue_t piece_picker::piece_pos::piece_full;
constexpr download_queue_t piece_picker::piece_pos::piece_finished;
constexpr download_queue_t piece_picker::piece_pos::piece_zero_prio;
constexpr download_queue_t piece_picker::piece_pos::num_download_categories;
constexpr download_queue_t piece_picker::piece_pos::piece_open;
constexpr download_queue_t piece_picker::piece_pos::piece_downloading_reverse;
constexpr download_queue_t piece_picker::piece_pos::piece_full_reverse;
// the max number of blocks to create an affinity for
constexpr int max_piece_affinity_extent = 4 * 1024 * 1024 / default_block_size;
piece_picker::piece_picker(std::int64_t const total_size, int const piece_size)
: m_priority_boundaries(1, m_pieces.end_index())
{
TORRENT_ASSERT(total_size > 0);
TORRENT_ASSERT(piece_size > 0);
| ||
relevance 0 | ../src/piece_picker.cpp:2074 | this could probably be optimized by incrementally calling partial_sort to sort one more element in the list. Because chances are that we'll just need a single piece, and once we've picked from it we're done. Sorting the rest of the list in that case is a waste of time. |
this could probably be optimized by incrementally
calling partial_sort to sort one more element in the list. Because
chances are that we'll just need a single piece, and once we've
picked from it we're done. Sorting the rest of the list in that
case is a waste of time.../src/piece_picker.cpp:2074 // now, copy over the pointers. We also apply a filter here to not
// include ineligible pieces in certain modes. For instance, a piece
// that the current peer doesn't have is not included.
for (auto& dp : m_downloads[piece_pos::piece_downloading])
{
pc.inc_stats_counter(counters::piece_picker_partial_loops);
if (!is_piece_free(dp.index, pieces)) continue;
TORRENT_ASSERT(m_piece_map[dp.index].download_queue()
== piece_pos::piece_downloading);
ordered_partials[num_ordered_partials++] = &dp;
}
// now, sort the list.
if (options & rarest_first)
{
ret |= picker_log_alert::rarest_first_partials;
std::sort(ordered_partials.begin(), ordered_partials.begin() + num_ordered_partials
, std::bind(&piece_picker::partial_compare_rarest_first, this
, _1, _2));
}
for (int i = 0; i < num_ordered_partials; ++i)
{
ret |= picker_log_alert::prioritize_partials;
num_blocks = add_blocks_downloading(*ordered_partials[i], pieces
, interesting_blocks, backup_blocks, backup_blocks2
, num_blocks, prefer_contiguous_blocks, peer, options);
if (num_blocks <= 0) return ret;
if (int(backup_blocks.size()) >= num_blocks
&& int(backup_blocks2.size()) >= num_blocks)
break;
}
num_blocks = append_blocks(interesting_blocks, backup_blocks
, num_blocks);
if (num_blocks <= 0) return ret;
num_blocks = append_blocks(interesting_blocks, backup_blocks2
, num_blocks);
if (num_blocks <= 0) return ret;
}
if (!suggested_pieces.empty())
{
for (piece_index_t i : suggested_pieces)
{
| ||
relevance 0 | ../src/piece_picker.cpp:2218 | Is it a good idea that this affinity takes precedence over piece priority? |
Is it a good idea that this affinity takes precedence over
piece priority?../src/piece_picker.cpp:2218 prio_index_t const end = priority_end(i);
for (prio_index_t p = prev(end); p >= start; --p)
{
pc.inc_stats_counter(counters::piece_picker_reverse_rare_loops);
if (!is_piece_free(m_pieces[p], pieces)) continue;
ret |= picker_log_alert::reverse_rarest_first;
num_blocks = add_blocks(m_pieces[p], pieces
, interesting_blocks, backup_blocks
, backup_blocks2, num_blocks
, prefer_contiguous_blocks, peer, ignored_pieces
, options);
if (num_blocks <= 0) return ret;
}
}
}
else
{
if (options & piece_extent_affinity)
{
int to_erase = -1;
int idx = -1;
for (piece_extent_t const e : m_recent_extents)
{
++idx;
bool have_all = true;
for (piece_index_t const p : extent_for(e))
{
if (!m_piece_map[p].have()) have_all = false;
if (!is_piece_free(p, pieces)) continue;
ret |= picker_log_alert::extent_affinity;
num_blocks = add_blocks(p, pieces
, interesting_blocks, backup_blocks
, backup_blocks2, num_blocks
, prefer_contiguous_blocks, peer, ignored_pieces
, options);
if (num_blocks <= 0)
{
// if we have all pieces belonging to this extent, remove it
if (to_erase != -1) m_recent_extents.erase(m_recent_extents.begin() + to_erase);
return ret;
}
}
// if we have all pieces belonging to this extent, remove it
if (have_all) to_erase = idx;
}
if (to_erase != -1) m_recent_extents.erase(m_recent_extents.begin() + to_erase);
| ||
relevance 0 | ../src/piece_picker.cpp:2572 | when expanding pieces for cache stripe reasons, the !downloading condition doesn't make much sense |
when expanding pieces for cache stripe reasons,
the !downloading condition doesn't make much sense../src/piece_picker.cpp:2572 TORRENT_ASSERT(index < m_piece_map.end_index());
if (next(index) == m_piece_map.end_index())
return m_blocks_in_last_piece;
else
return blocks_per_piece();
}
bool piece_picker::is_piece_free(piece_index_t const piece
, typed_bitfield<piece_index_t> const& bitmask) const
{
return bitmask[piece]
&& !m_piece_map[piece].have()
&& !m_piece_map[piece].filtered();
}
bool piece_picker::can_pick(piece_index_t const piece
, typed_bitfield<piece_index_t> const& bitmask) const
{
return bitmask[piece]
&& !m_piece_map[piece].have()
&& !m_piece_map[piece].downloading()
&& !m_piece_map[piece].filtered();
}
#if TORRENT_USE_INVARIANT_CHECKS
void piece_picker::check_peers()
{
for (auto const& b : m_block_info)
{
TORRENT_ASSERT(b.peer == nullptr || static_cast<torrent_peer*>(b.peer)->in_use);
}
}
#endif
void piece_picker::clear_peer(torrent_peer* peer)
{
for (auto& b : m_block_info)
{
if (b.peer == peer) b.peer = nullptr;
}
}
// the first bool is true if this is the only peer that has requested and downloaded
// blocks from this piece.
// the second bool is true if this is the only active peer that is requesting
// and downloading blocks from this piece. Active means having a connection.
| ||
relevance 0 | ../src/piece_picker.cpp:3150 | should 5 be configurable? |
should 5 be configurable?../src/piece_picker.cpp:3150 bool have_all = true;
for (auto const piece : extent_for(this_extent))
{
if (piece == p) continue;
if (!m_piece_map[piece].have()) have_all = false;
// if at least one piece in this extent has a different priority than
// the one we just started downloading, don't create an affinity for
// adjacent pieces. This probably means the pieces belong to different
// files, or that some other mechanism determining the priority should
// take precedence.
if (piece_priority(piece) != this_prio) return;
}
// if we already have all the *other* pieces in this extent, there's no
// need to inflate their priorities
if (have_all) return;
if (m_recent_extents.size() < 5)
m_recent_extents.push_back(this_extent);
// limit the number of extent affinities active at any given time to limit
// the cost of checking them. Also, don't replace them, commit to
// finishing them before starting another extent. This is analogous to
// limiting the number of partial pieces.
}
// options may be 0 or piece_picker::reverse
// returns false if the block could not be marked as downloading
bool piece_picker::mark_as_downloading(piece_block const block
, torrent_peer* peer, picker_options_t const options)
{
#ifdef TORRENT_PICKER_LOG
std::cerr << "[" << this << "] " << "mark_as_downloading( {"
<< block.piece_index << ", " << block.block_index << "} )" << std::endl;
#endif
TORRENT_ASSERT(peer == nullptr || peer->in_use);
TORRENT_ASSERT(block.block_index != piece_block::invalid.block_index);
TORRENT_ASSERT(block.piece_index != piece_block::invalid.piece_index);
TORRENT_ASSERT(block.piece_index < m_piece_map.end_index());
TORRENT_ASSERT(block.block_index < blocks_in_piece(block.piece_index));
TORRENT_ASSERT(!m_piece_map[block.piece_index].have());
piece_pos& p = m_piece_map[block.piece_index];
if (p.download_queue() == piece_pos::piece_open)
{
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
INVARIANT_CHECK;
| ||
relevance 0 | ../src/session_impl.cpp:615 | come up with some abstraction to do this for gnutls as well load certificates from the windows system certificate store |
come up with some abstraction to do this for gnutls as well
load certificates from the windows system certificate store../src/session_impl.cpp:615 pause();
}
#endif
// This function is called by the creating thread, not in the message loop's
// io_context thread.
void session_impl::start_session()
{
#ifndef TORRENT_DISABLE_LOGGING
session_log("start session");
#endif
#if TORRENT_USE_SSL
error_code ec;
m_ssl_ctx.set_default_verify_paths(ec);
#ifndef TORRENT_DISABLE_LOGGING
if (ec) session_log("SSL set_default verify_paths failed: %s", ec.message().c_str());
ec.clear();
#endif
#if defined TORRENT_WINDOWS && defined TORRENT_USE_OPENSSL && !defined TORRENT_WINRT
X509_STORE* store = X509_STORE_new();
if (store)
{
HCERTSTORE system_store = CertOpenSystemStoreA(0, "ROOT");
// this is best effort
if (system_store)
{
CERT_CONTEXT const* ctx = nullptr;
while ((ctx = CertEnumCertificatesInStore(system_store, ctx)) != nullptr)
{
unsigned char const* cert_ptr = reinterpret_cast<unsigned char const*>(ctx->pbCertEncoded);
X509* x509 = d2i_X509(nullptr, &cert_ptr, ctx->cbCertEncoded);
// this is best effort
if (!x509) continue;
X509_STORE_add_cert(store, x509);
X509_free(x509);
}
CertFreeCertificateContext(ctx);
CertCloseStore(system_store, 0);
}
}
SSL_CTX* ssl_ctx = m_ssl_ctx.native_handle();
SSL_CTX_set_cert_store(ssl_ctx, store);
#endif
#ifdef __APPLE__
m_ssl_ctx.load_verify_file("/etc/ssl/cert.pem", ec);
#ifndef TORRENT_DISABLE_LOGGING
if (ec) session_log("SSL load_verify_file failed: %s", ec.message().c_str());
ec.clear();
#endif
| ||
relevance 0 | ../src/session_impl.cpp:1482 | it would be nice to reserve() these vectors up front |
it would be nice to reserve() these vectors up front../src/session_impl.cpp:1482 bandwidth_channel* ch = &p->channel[peer_connection::download_channel];
if (use_quota_overhead(ch, amount_down))
ret |= 1 << peer_connection::download_channel;
ch = &p->channel[peer_connection::upload_channel];
if (use_quota_overhead(ch, amount_up))
ret |= 1 << peer_connection::upload_channel;
}
return ret;
}
// session_impl is responsible for deleting 'pack'
void session_impl::apply_settings_pack(std::shared_ptr<settings_pack> pack)
{
INVARIANT_CHECK;
apply_settings_pack_impl(*pack);
}
settings_pack session_impl::get_settings() const
{
settings_pack ret;
for (int i = settings_pack::string_type_base;
i < settings_pack::max_string_setting_internal; ++i)
{
ret.set_str(i, m_settings.get_str(i));
}
for (int i = settings_pack::int_type_base;
i < settings_pack::max_int_setting_internal; ++i)
{
ret.set_int(i, m_settings.get_int(i));
}
for (int i = settings_pack::bool_type_base;
i < settings_pack::max_bool_setting_internal; ++i)
{
ret.set_bool(i, m_settings.get_bool(i));
}
return ret;
}
namespace {
template <typename Pack>
int get_setting_impl(Pack const& p, int name, int*)
{ return p.get_int(name); }
template <typename Pack>
bool get_setting_impl(Pack const& p, int name, bool*)
{ return p.get_bool(name); }
template <typename Pack>
std::string get_setting_impl(Pack const& p, int name, std::string*)
{ return p.get_str(name); }
| ||
relevance 0 | ../src/session_impl.cpp:1996 | could this function be merged with expand_unspecified_addresses? right now both listen_endpoint_t and listen_interface_t are almost identical, maybe the latter could be removed too |
could this function be merged with expand_unspecified_addresses?
right now both listen_endpoint_t and listen_interface_t are almost
identical, maybe the latter could be removed too../src/session_impl.cpp:1996 session_log("FATAL SESSION ERROR (%s : %d) [%s]"
, ec.category().name(), ec.value(), ec.message().c_str());
#endif
this->abort();
}
void session_impl::on_ip_change(error_code const& ec)
{
#ifndef TORRENT_DISABLE_LOGGING
if (!ec)
session_log("received ip change from internal ip_notifier");
else
session_log("received error on_ip_change: %d, %s", ec.value(), ec.message().c_str());
#endif
if (ec || m_abort || !m_ip_notifier) return;
m_ip_notifier->async_wait([this] (error_code const& e)
{ wrap(&session_impl::on_ip_change, e); });
reopen_network_sockets({});
}
void interface_to_endpoints(listen_interface_t const& iface
, listen_socket_flags_t flags
, span<ip_interface const> const ifs
, std::vector<listen_endpoint_t>& eps)
{
flags |= iface.local ? listen_socket_t::local_network : listen_socket_flags_t{};
transport const ssl = iface.ssl ? transport::ssl : transport::plaintext;
// First, check to see if it's an IP address
error_code err;
address const adr = make_address(iface.device.c_str(), err);
if (!err)
{
eps.emplace_back(adr, iface.port, std::string{}, ssl, flags);
}
else
{
flags |= listen_socket_t::was_expanded;
// this is the case where device names a network device. We need to
// enumerate all IPs associated with this device
for (auto const& ipface : ifs)
{
// we're looking for a specific interface, and its address
// (which must be of the same family as the address we're
// connecting to)
if (iface.device != ipface.name) continue;
bool const local = iface.local
|| ipface.interface_address.is_loopback()
|| is_link_local(ipface.interface_address);
| ||
relevance 0 | ../src/session_impl.cpp:2304 | it would probably be better to do this by having a listen-socket "version" number that gets bumped. And instead of setting a bool to disable a tracker, we set the version number that it was disabled at. This change would affect the ABI in 1.2, so should be done in 2.0 or later |
it would probably be better to do this by having a
listen-socket "version" number that gets bumped. And instead of
setting a bool to disable a tracker, we set the version number that
it was disabled at. This change would affect the ABI in 1.2, so
should be done in 2.0 or later../src/session_impl.cpp:2304 {
for (auto const& s : m_listen_sockets)
remap_ports(remap_natpmp_and_upnp, *s);
}
else
{
// new sockets need to map ports even if the caller did not request
// re-mapping
for (auto const& s : new_sockets)
remap_ports(remap_natpmp_and_upnp, *s);
}
update_lsd();
#if TORRENT_USE_I2P
open_new_incoming_i2p_connection();
#endif
// trackers that were not reachable, may have become reachable now.
// so clear the "disabled" flags to let them be tried one more time
for (auto& t : m_torrents)
t->enable_all_trackers();
}
void session_impl::reopen_network_sockets(reopen_network_flags_t const options)
{
reopen_listen_sockets(bool(options & session_handle::reopen_map_ports));
}
namespace {
template <typename MapProtocol, typename ProtoType, typename EndpointType>
void map_port(MapProtocol& m, ProtoType protocol, EndpointType const& ep
, port_mapping_t& map_handle, std::string const& device)
{
if (map_handle != port_mapping_t{-1}) m.delete_mapping(map_handle);
map_handle = port_mapping_t{-1};
address const addr = ep.address();
// with IPv4 the interface might be behind NAT so we can't skip them
// based on the scope of the local address
if (addr.is_v6() && is_local(addr))
return;
// only update this mapping if we actually have a socket listening
if (ep != EndpointType())
map_handle = m.add_mapping(protocol, ep.port(), ep, device);
}
}
void session_impl::remap_ports(remap_port_mask_t const mask
, listen_socket_t& s)
| ||
relevance 0 | ../src/session_impl.cpp:2864 | this size need to be capped |
this size need to be capped../src/session_impl.cpp:2864 // handshaking
return socket_type(ssl_stream<tcp::socket>(tcp::socket(std::move(s)), m_peer_ssl_ctx));
}
else
#endif
{
return socket_type(tcp::socket(std::move(s)));
}
}();
#ifdef TORRENT_SSL_PEERS
TORRENT_ASSERT((ssl == transport::ssl) == is_ssl(c));
#endif
#ifdef TORRENT_SSL_PEERS
if (ssl == transport::ssl)
{
TORRENT_ASSERT(is_ssl(c));
// save the socket so we can cancel the handshake
auto iter = m_incoming_sockets.emplace(std::make_unique<socket_type>(std::move(c))).first;
auto sock = iter->get();
// for SSL connections, incoming_connection() is called
// after the handshake is done
ADD_OUTSTANDING_ASYNC("session_impl::ssl_handshake");
boost::get<ssl_stream<tcp::socket>>(**iter).async_accept_handshake(
[this, sock] (error_code const& err) { ssl_handshake(err, sock); });
}
else
#endif
{
incoming_connection(std::move(c));
}
}
#ifdef TORRENT_SSL_PEERS
void session_impl::on_incoming_utp_ssl(socket_type s)
{
TORRENT_ASSERT(is_ssl(s));
// save the socket so we can cancel the handshake
| ||
relevance 0 | ../src/session_impl.cpp:2889 | this size need to be capped |
this size need to be capped../src/session_impl.cpp:2889 // after the handshake is done
ADD_OUTSTANDING_ASYNC("session_impl::ssl_handshake");
boost::get<ssl_stream<tcp::socket>>(**iter).async_accept_handshake(
[this, sock] (error_code const& err) { ssl_handshake(err, sock); });
}
else
#endif
{
incoming_connection(std::move(c));
}
}
#ifdef TORRENT_SSL_PEERS
void session_impl::on_incoming_utp_ssl(socket_type s)
{
TORRENT_ASSERT(is_ssl(s));
// save the socket so we can cancel the handshake
auto iter = m_incoming_sockets.emplace(std::make_unique<socket_type>(std::move(s))).first;
auto sock = iter->get();
// for SSL connections, incoming_connection() is called
// after the handshake is done
ADD_OUTSTANDING_ASYNC("session_impl::ssl_handshake");
boost::get<ssl_stream<utp_stream>>(**iter).async_accept_handshake(
[this, sock] (error_code const& err) { ssl_handshake(err, sock); });
}
// to test SSL connections, one can use this openssl command template:
//
// openssl s_client -cert <client-cert>.pem -key <client-private-key>.pem
// -CAfile <torrent-cert>.pem -debug -connect 127.0.0.1:4433 -tls1
// -servername <hex-encoded-info-hash>
void session_impl::ssl_handshake(error_code const& ec, socket_type* sock)
{
COMPLETE_ASYNC("session_impl::ssl_handshake");
auto iter = m_incoming_sockets.find(sock);
// this happens if the SSL connection is aborted because we're shutting
// down
if (iter == m_incoming_sockets.end()) return;
socket_type s(std::move(**iter));
TORRENT_ASSERT(is_ssl(s));
m_incoming_sockets.erase(iter);
error_code e;
| ||
relevance 0 | ../src/session_impl.cpp:3588 | have a separate list for these connections, instead of having to loop through all of them |
have a separate list for these connections, instead of having to loop through all of them../src/session_impl.cpp:3588 // --------------------------------------------------------------
if (!m_paused) m_auto_manage_time_scaler--;
if (m_auto_manage_time_scaler < 0)
{
m_auto_manage_time_scaler = settings().get_int(settings_pack::auto_manage_interval);
recalculate_auto_managed_torrents();
}
// --------------------------------------------------------------
// check for incoming connections that might have timed out
// --------------------------------------------------------------
for (auto i = m_connections.begin(); i != m_connections.end();)
{
peer_connection* p = (*i).get();
++i;
// ignore connections that already have a torrent, since they
// are ticked through the torrents' second_tick
if (!p->associated_torrent().expired()) continue;
int timeout = m_settings.get_int(settings_pack::handshake_timeout);
#if TORRENT_USE_I2P
timeout *= is_i2p(p->get_socket()) ? 4 : 1;
#endif
if (m_last_tick - p->connected_time () > seconds(timeout))
p->disconnect(errors::timed_out, operation_t::bittorrent);
}
// --------------------------------------------------------------
// second_tick every torrent (that wants it)
// --------------------------------------------------------------
#if TORRENT_DEBUG_STREAMING > 0
std::printf("\033[2J\033[0;0H");
#endif
aux::vector<torrent*>& want_tick = m_torrent_lists[torrent_want_tick];
for (int i = 0; i < int(want_tick.size()); ++i)
{
torrent& t = *want_tick[i];
TORRENT_ASSERT(t.want_tick());
TORRENT_ASSERT(!t.is_aborted());
t.second_tick(tick_interval_ms);
// if the call to second_tick caused the torrent
// to no longer want to be ticked (i.e. it was
// removed from the list) we need to back up the counter
// to not miss the torrent after it
if (!t.want_tick()) --i;
}
| ||
relevance 0 | ../src/session_impl.cpp:3621 | this should apply to all bandwidth channels |
this should apply to all bandwidth channels../src/session_impl.cpp:3621#if TORRENT_DEBUG_STREAMING > 0
std::printf("\033[2J\033[0;0H");
#endif
aux::vector<torrent*>& want_tick = m_torrent_lists[torrent_want_tick];
for (int i = 0; i < int(want_tick.size()); ++i)
{
torrent& t = *want_tick[i];
TORRENT_ASSERT(t.want_tick());
TORRENT_ASSERT(!t.is_aborted());
t.second_tick(tick_interval_ms);
// if the call to second_tick caused the torrent
// to no longer want to be ticked (i.e. it was
// removed from the list) we need to back up the counter
// to not miss the torrent after it
if (!t.want_tick()) --i;
}
if (m_settings.get_bool(settings_pack::rate_limit_ip_overhead))
{
int const up_limit = upload_rate_limit(m_global_class);
int const down_limit = download_rate_limit(m_global_class);
if (down_limit > 0
&& m_stat.download_ip_overhead() >= down_limit
&& m_alerts.should_post<performance_alert>())
{
m_alerts.emplace_alert<performance_alert>(torrent_handle()
, performance_alert::download_limit_too_low);
}
if (up_limit > 0
&& m_stat.upload_ip_overhead() >= up_limit
&& m_alerts.should_post<performance_alert>())
{
m_alerts.emplace_alert<performance_alert>(torrent_handle()
, performance_alert::upload_limit_too_low);
}
}
#if TORRENT_ABI_VERSION == 1
m_peak_up_rate = std::max(m_stat.upload_rate(), m_peak_up_rate);
#endif
m_stat.second_tick(tick_interval_ms);
// --------------------------------------------------------------
// scrape paused torrents that are auto managed
// (unless the session is paused)
| ||
relevance 0 | ../src/session_impl.cpp:4312 | use a lower limit than m_settings.connections_limit to allocate the to 10% or so of connection slots for incoming connections cap this at max - 1, since we may add one below |
use a lower limit than m_settings.connections_limit
to allocate the to 10% or so of connection slots for incoming
connections
cap this at max - 1, since we may add one below../src/session_impl.cpp:4312 // boost, which are done immediately on a tracker response. These
// connections needs to be deducted from the regular connection attempt
// quota for this tick
if (m_boost_connections > 0)
{
if (m_boost_connections > max_connections)
{
m_boost_connections -= max_connections;
max_connections = 0;
}
else
{
max_connections -= m_boost_connections;
m_boost_connections = 0;
}
}
// zero connections speeds are allowed, we just won't make any connections
if (max_connections <= 0) return;
int const limit = std::min(m_settings.get_int(settings_pack::connections_limit)
- num_connections(), std::numeric_limits<int>::max() - 1);
// this logic is here to smooth out the number of new connection
// attempts over time, to prevent connecting a large number of
// sockets, wait 10 seconds, and then try again
if (m_settings.get_bool(settings_pack::smooth_connects) && max_connections > (limit+1) / 2)
max_connections = (limit + 1) / 2;
aux::vector<torrent*>& want_peers_download = m_torrent_lists[torrent_want_peers_download];
aux::vector<torrent*>& want_peers_finished = m_torrent_lists[torrent_want_peers_finished];
// if no torrent want any peers, just return
if (want_peers_download.empty() && want_peers_finished.empty()) return;
// if we don't have any connection attempt quota, return
if (max_connections <= 0) return;
int steps_since_last_connect = 0;
int const num_torrents = int(want_peers_finished.size() + want_peers_download.size());
for (;;)
{
if (m_next_downloading_connect_torrent >= int(want_peers_download.size()))
m_next_downloading_connect_torrent = 0;
if (m_next_finished_connect_torrent >= int(want_peers_finished.size()))
m_next_finished_connect_torrent = 0;
torrent* t = nullptr;
// there are prioritized torrents. Pick one of those
while (!m_prio_torrents.empty())
| ||
relevance 0 | ../src/session_impl.cpp:4457 | post a message to have this happen immediately instead of waiting for the next tick |
post a message to have this happen
immediately instead of waiting for the next tick../src/session_impl.cpp:4457 continue;
}
if (!p->is_peer_interested()
|| p->is_disconnecting()
|| p->is_connecting())
{
// this peer is not unchokable. So, if it's unchoked
// already, make sure to choke it.
if (p->is_choked())
{
p->reset_choke_counters();
continue;
}
if (pi && pi->optimistically_unchoked)
{
m_stats_counters.inc_stats_counter(counters::num_peers_up_unchoked_optimistic, -1);
pi->optimistically_unchoked = false;
// force a new optimistic unchoke
m_optimistic_unchoke_time_scaler = 0;
}
t->choke_peer(*p);
p->reset_choke_counters();
continue;
}
peers.push_back(p.get());
}
int const allowed_upload_slots = unchoke_sort(peers
, unchoke_interval, m_settings);
if (m_settings.get_int(settings_pack::choking_algorithm) == settings_pack::fixed_slots_choker)
{
int const upload_slots = get_int_setting(settings_pack::unchoke_slots_limit);
m_stats_counters.set_value(counters::num_unchoke_slots, upload_slots);
}
else
{
m_stats_counters.set_value(counters::num_unchoke_slots
, allowed_upload_slots);
}
#ifndef TORRENT_DISABLE_LOGGING
if (should_log())
{
session_log("RECALCULATE UNCHOKE SLOTS: [ peers: %d "
"eligible-peers: %d"
" allowed-slots: %d ]"
, int(m_connections.size())
, int(peers.size())
| ||
relevance 0 | ../src/session_impl.cpp:4784 | it might be a nice feature here to limit the number of torrents to send in a single update. By just posting the first n torrents, they would nicely be round-robined because the torrent lists are always pushed back. Perhaps the status_update_alert could even have a fixed array of n entries rather than a vector, to further improve memory locality. |
it might be a nice feature here to limit the number of torrents
to send in a single update. By just posting the first n torrents, they
would nicely be round-robined because the torrent lists are always
pushed back. Perhaps the status_update_alert could even have a fixed
array of n entries rather than a vector, to further improve memory
locality.../src/session_impl.cpp:4784 t->status(&st, flags);
}
}
void session_impl::post_torrent_updates(status_flags_t const flags)
{
INVARIANT_CHECK;
TORRENT_ASSERT(is_single_thread());
std::vector<torrent*>& state_updates
= m_torrent_lists[aux::session_impl::torrent_state_updates];
#if TORRENT_USE_ASSERTS
m_posting_torrent_updates = true;
#endif
std::vector<torrent_status> status;
status.reserve(state_updates.size());
for (auto& t : state_updates)
{
TORRENT_ASSERT(t->m_links[aux::session_impl::torrent_state_updates].in_list());
status.emplace_back();
// querying accurate download counters may require
// the torrent to be loaded. Loading a torrent, and evicting another
// one will lead to calling state_updated(), which screws with
// this list while we're working on it, and break things
t->status(&status.back(), flags);
t->clear_in_state_update();
}
state_updates.clear();
#if TORRENT_USE_ASSERTS
m_posting_torrent_updates = false;
#endif
m_alerts.emplace_alert<state_update_alert>(std::move(status));
}
void session_impl::post_session_stats()
{
if (!m_posted_stats_header)
{
m_posted_stats_header = true;
m_alerts.emplace_alert<session_stats_header_alert>();
}
m_disk_thread->update_stats_counters(m_stats_counters);
#ifndef TORRENT_DISABLE_DHT
if (m_dht)
| ||
relevance 0 | ../src/session_impl.cpp:5159 | factor out this logic into a separate function for unit testing |
factor out this logic into a separate function for unit
testing../src/session_impl.cpp:5159 if (m_settings.get_int(settings_pack::outgoing_port) > 0)
{
#ifdef TORRENT_WINDOWS
s.set_option(exclusive_address_use(true), ec);
#else
s.set_option(tcp::acceptor::reuse_address(true), ec);
#endif
// ignore errors because the underlying socket may not
// be opened yet. This happens when we're routing through
// a proxy. In that case, we don't yet know the address of
// the proxy server, and more importantly, we don't know
// the address family of its address. This means we can't
// open the socket yet. The socks abstraction layer defers
// opening it.
ec.clear();
bind_ep.port(std::uint16_t(next_port()));
}
if (is_utp(s))
{
utp_socket_impl* impl = nullptr;
transport ssl = transport::plaintext;
#if TORRENT_USE_SSL
if (boost::get<ssl_stream<utp_stream>>(&s) != nullptr)
{
impl = boost::get<ssl_stream<utp_stream>>(s).next_layer().get_impl();
ssl = transport::ssl;
}
else
#endif
impl = boost::get<utp_stream>(s).get_impl();
std::vector<std::shared_ptr<listen_socket_t>> with_gateways;
std::shared_ptr<listen_socket_t> match;
for (auto& ls : m_listen_sockets)
{
// this is almost, but not quite, like can_route()
if (!(ls->flags & listen_socket_t::proxy)
&& is_v4(ls->local_endpoint) != remote_address.is_v4())
continue;
if (ls->ssl != ssl) continue;
if (!(ls->flags & listen_socket_t::local_network))
with_gateways.push_back(ls);
if (ls->flags & listen_socket_t::proxy
|| match_addr_mask(ls->local_endpoint.address(), remote_address, ls->netmask))
{
// is this better than the previous match?
match = ls;
}
| ||
relevance 0 | ../src/session_impl.cpp:5878 | refactor, move the storage to dht_tracker |
refactor, move the storage to dht_tracker../src/session_impl.cpp:5878#ifndef TORRENT_DISABLE_LOGGING
session_log("not starting DHT, outstanding router lookups: %d"
, m_outstanding_router_lookups);
#endif
return;
}
if (m_abort)
{
#ifndef TORRENT_DISABLE_LOGGING
session_log("not starting DHT, aborting");
#endif
return;
}
#ifndef TORRENT_DISABLE_LOGGING
session_log("starting DHT, running: %s, router lookups: %d"
, m_dht ? "true" : "false", m_outstanding_router_lookups);
#endif
m_dht_storage = m_dht_storage_constructor(m_settings);
m_dht = std::make_shared<dht::dht_tracker>(
static_cast<dht::dht_observer*>(this)
, m_io_context
, [this](aux::listen_socket_handle const& sock
, udp::endpoint const& ep
, span<char const> p
, error_code& ec
, udp_send_flags_t const flags)
{ send_udp_packet_listen(sock, ep, p, ec, flags); }
, m_settings
, m_stats_counters
, *m_dht_storage
, std::move(m_dht_state));
for (auto& s : m_listen_sockets)
{
if (s->ssl != transport::ssl
&& !(s->flags & listen_socket_t::local_network))
{
m_dht->new_socket(s);
}
}
for (auto const& n : m_dht_router_nodes)
{
m_dht->add_router_node(n);
}
for (auto const& n : m_dht_nodes)
{
| ||
relevance 0 | ../src/session_impl.cpp:6265 | asserts that no outstanding async operations are still in flight |
asserts that no outstanding async operations are still in flight../src/session_impl.cpp:6265 if (!m_dht) return;
m_dht->direct_request(ep, e, std::bind(&on_direct_response, std::ref(m_alerts), userdata, _1));
}
#endif
bool session_impl::is_listening() const
{
return !m_listen_sockets.empty();
}
session_impl::~session_impl()
{
// since we're destructing the session, no more alerts will make it out to
// the user. So stop posting them now
m_alerts.set_alert_mask({});
m_alerts.set_notify_function({});
// this is not allowed to be the network thread!
// TORRENT_ASSERT(is_not_thread());
// this can happen if we end the io_context run loop with an exception
m_connections.clear();
for (auto& t : m_torrents)
{
t->panic();
t->abort();
}
m_torrents.clear();
// this has probably been called already, but in case of sudden
// termination through an exception, it may not have been done
abort_stage2();
#if defined TORRENT_ASIO_DEBUGGING
FILE* f = fopen("wakeups.log", "w+");
if (f != nullptr)
{
time_point m = min_time();
if (!_wakeups.empty()) m = _wakeups[0].timestamp;
time_point prev = m;
std::uint64_t prev_csw = 0;
if (!_wakeups.empty()) prev_csw = _wakeups[0].context_switches;
std::fprintf(f, "abs. time\trel. time\tctx switch\tidle-wakeup\toperation\n");
for (wakeup_t const& w : _wakeups)
{
bool const idle_wakeup = w.context_switches > prev_csw;
std::fprintf(f, "%" PRId64 "\t%" PRId64 "\t%" PRId64 "\t%c\t%s\n"
, total_microseconds(w.timestamp - m)
, total_microseconds(w.timestamp - prev)
, w.context_switches
| ||
relevance 0 | ../src/load_torrent.cpp:121 | move the loading logic from torrent_info constructor into here |
move the loading logic from torrent_info constructor into here../src/load_torrent.cpp:121 mask.resize(std::size_t(full_size), false);
for (int i = merkle_first_leaf(piece_layer_size)
, end = i + num_pieces; i < end; ++i)
{
mask[std::size_t(i)] = true;
}
}
ti->free_piece_layers();
}
atp.info_hashes = atp.ti->info_hashes();
}
}
add_torrent_params load_torrent_file(std::string const& filename)
{ return load_torrent_file(filename, load_torrent_limits{}); }
add_torrent_params load_torrent_buffer(span<char const> buffer)
{ return load_torrent_buffer(buffer, load_torrent_limits{}); }
add_torrent_params load_torrent_parsed(bdecode_node const& torrent_file)
{ return load_torrent_parsed(torrent_file, load_torrent_limits{}); }
add_torrent_params load_torrent_file(std::string const& filename, load_torrent_limits const& cfg)
{
add_torrent_params ret;
ret.ti = std::make_shared<torrent_info>(filename, cfg);
update_atp(ret);
return ret;
}
add_torrent_params load_torrent_buffer(span<char const> buffer, load_torrent_limits const& cfg)
{
add_torrent_params ret;
ret.ti = std::make_shared<torrent_info>(buffer, cfg, from_span);
update_atp(ret);
return ret;
}
add_torrent_params load_torrent_parsed(bdecode_node const& torrent_file, load_torrent_limits const& cfg)
{
add_torrent_params ret;
ret.ti = std::make_shared<torrent_info>(torrent_file, cfg);
update_atp(ret);
return ret;
}
}
| ||
relevance 0 | ../src/kademlia/node.cpp:1177 | keep the returned value to pass as a limit to write_nodes_entries when implemented |
keep the returned value to pass as a limit
to write_nodes_entries when implemented../src/kademlia/node.cpp:1177 }
}
else if (query == "sample_infohashes")
{
static key_desc_t const msg_desc[] = {
{"target", bdecode_node::string_t, 20, 0},
{"want", bdecode_node::list_t, 0, key_desc_t::optional},
};
bdecode_node msg_keys[2];
if (!verify_message(arg_ent, msg_desc, msg_keys, error_string))
{
m_counters.inc_stats_counter(counters::dht_invalid_sample_infohashes);
incoming_error(e, error_string);
return;
}
m_counters.inc_stats_counter(counters::dht_sample_infohashes_in);
sha1_hash const target(msg_keys[0].string_ptr());
m_storage.get_infohashes_sample(reply);
write_nodes_entries(target, msg_keys[1], reply);
}
else
{
// if we don't recognize the message but there's a
// 'target' or 'info_hash' in the arguments, treat it
// as find_node to be future compatible
bdecode_node target_ent = arg_ent.dict_find_string("target");
if (!target_ent || target_ent.string_length() != 20)
{
target_ent = arg_ent.dict_find_string("info_hash");
if (!target_ent || target_ent.string_length() != 20)
{
incoming_error(e, "unknown message");
return;
}
}
sha1_hash const target(target_ent.string_ptr());
// always return nodes as well as peers
write_nodes_entries(target, arg_ent.dict_find_list("want"), reply);
}
}
| ||
relevance 0 | ../src/kademlia/node.cpp:1205 | limit number of entries in the result |
limit number of entries in the result../src/kademlia/node.cpp:1205 // if we don't recognize the message but there's a
// 'target' or 'info_hash' in the arguments, treat it
// as find_node to be future compatible
bdecode_node target_ent = arg_ent.dict_find_string("target");
if (!target_ent || target_ent.string_length() != 20)
{
target_ent = arg_ent.dict_find_string("info_hash");
if (!target_ent || target_ent.string_length() != 20)
{
incoming_error(e, "unknown message");
return;
}
}
sha1_hash const target(target_ent.string_ptr());
// always return nodes as well as peers
write_nodes_entries(target, arg_ent.dict_find_list("want"), reply);
}
}
void node::write_nodes_entries(sha1_hash const& info_hash
, bdecode_node const& want, entry& r)
{
// if no wants entry was specified, include a nodes
// entry based on the protocol the request came in with
if (want.type() != bdecode_node::list_t)
{
std::vector<node_entry> const n = m_table.find_node(info_hash, {});
r[protocol_nodes_key()] = write_nodes_entry(n);
return;
}
// if there is a wants entry then we may need to reach into
// another node's routing table to get nodes of the requested type
// we use a map maintained by the owning dht_tracker to find the
// node associated with each string in the want list, which may
// include this node
for (int i = 0; i < want.list_size(); ++i)
{
bdecode_node wanted = want.list_at(i);
if (wanted.type() != bdecode_node::string_t)
continue;
node* wanted_node = m_get_foreign_node(info_hash, wanted.string_value().to_string());
if (!wanted_node) continue;
std::vector<node_entry> const n = wanted_node->m_table.find_node(info_hash, {});
r[wanted_node->protocol_nodes_key()] = write_nodes_entry(n);
}
}
node::protocol_descriptor const& node::map_protocol_to_descriptor(udp const protocol)
{
| ||
relevance 0 | ../src/kademlia/item.cpp:143 | implement ctor for entry from bdecode_node? |
implement ctor for entry from bdecode_node?../src/kademlia/item.cpp:143 , secret_key const& sk)
{
char str[1200];
int const len = canonical_string(v, seq, salt, str);
return ed25519_sign({str, len}, pk, sk);
}
item::item(public_key const& pk, span<char const> salt)
: m_salt(salt.data(), static_cast<std::size_t>(salt.size()))
, m_pk(pk)
, m_mutable(true)
{}
item::item(entry v)
: m_value(std::move(v))
{}
item::item(bdecode_node const& v)
{
m_value = v;
}
item::item(entry v, span<char const> salt
, sequence_number const seq, public_key const& pk, secret_key const& sk)
{
assign(std::move(v), salt, seq, pk, sk);
}
void item::assign(entry v)
{
m_mutable = false;
m_value = std::move(v);
}
void item::assign(entry v, span<char const> salt
, sequence_number const seq, public_key const& pk, secret_key const& sk)
{
std::array<char, 1000> buffer;
int const bsize = bencode(buffer.begin(), v);
TORRENT_ASSERT(bsize <= 1000);
m_sig = sign_mutable_item(span<char const>(buffer).first(bsize)
, salt, seq, pk, sk);
m_salt.assign(salt.data(), static_cast<std::size_t>(salt.size()));
m_pk = pk;
m_seq = seq;
m_mutable = true;
m_value = std::move(v);
}
void item::assign(bdecode_node const& v)
| ||
relevance 0 | ../src/kademlia/dht_tracker.cpp:317 | pick the closest node rather than the first |
pick the closest node rather than the first../src/kademlia/dht_tracker.cpp:317
#ifndef TORRENT_DISABLE_LOGGING
m_log->log(dht_logger::tracker, "*** new write key*** %d nodes"
, int(m_nodes.size()));
#endif
}
void dht_tracker::update_storage_node_ids()
{
std::vector<sha1_hash> ids;
for (auto& n : m_nodes)
ids.push_back(n.second.dht.nid());
m_storage.update_node_ids(ids);
}
node* dht_tracker::get_node(node_id const& id, std::string const& family_name)
{
TORRENT_UNUSED(id);
for (auto& n : m_nodes)
{
if (n.second.dht.protocol_family_name() == family_name)
return &n.second.dht;
}
return nullptr;
}
void dht_tracker::get_peers(sha1_hash const& ih
, std::function<void(std::vector<tcp::endpoint> const&)> f)
{
for (auto& n : m_nodes)
n.second.dht.get_peers(ih, f, {}, {});
}
void dht_tracker::announce(sha1_hash const& ih, int listen_port
, announce_flags_t const flags
, std::function<void(std::vector<tcp::endpoint> const&)> f)
{
for (auto& n : m_nodes)
n.second.dht.announce(ih, listen_port, flags, f);
}
void dht_tracker::sample_infohashes(udp::endpoint const& ep, sha1_hash const& target
, std::function<void(node_id
, time_duration
, int, std::vector<sha1_hash>
, std::vector<std::pair<sha1_hash, udp::endpoint>>)> f)
{
for (auto& n : m_nodes)
{
if (ep.protocol() != (n.first.get_external_address().is_v4() ? udp::v4() : udp::v6()))
| ||
relevance 0 | ../src/kademlia/put_data.cpp:92 | what if o is not an instance of put_data_observer? This need to be redesigned for better type safety. |
what if o is not an instance of put_data_observer? This need to be
redesigned for better type safety.../src/kademlia/put_data.cpp:92 }
}
void put_data::done()
{
m_done = true;
#ifndef TORRENT_DISABLE_LOGGING
get_node().observer()->log(dht_logger::traversal, "[%u] %s DONE, response %d, timeout %d"
, id(), name(), num_responses(), num_timeouts());
#endif
m_put_callback(m_data, num_responses());
traversal_algorithm::done();
}
bool put_data::invoke(observer_ptr o)
{
if (m_done) return false;
auto* po = static_cast<put_data_observer*>(o.get());
entry e;
e["y"] = "q";
e["q"] = "put";
entry& a = e["a"];
a["v"] = m_data.value();
a["token"] = po->m_token;
if (m_data.is_mutable())
{
a["k"] = m_data.pk().bytes;
a["seq"] = m_data.seq().value;
a["sig"] = m_data.sig().bytes;
if (!m_data.salt().empty())
{
a["salt"] = m_data.salt();
}
}
m_node.stats_counters().inc_stats_counter(counters::dht_put_out);
return m_node.m_rpc.invoke(e, o->target_ep(), o);
}
} } // namespace libtorrent::dht
| ||
relevance 0 | ../src/kademlia/routing_table.cpp:289 | This is temporary. For now, only report the largest routing table (of potentially multiple ones, for multi-homed systems) in next major version, break the ABI and support reporting all of them in the dht_stats_alert |
This is temporary. For now, only report the largest routing table
(of potentially multiple ones, for multi-homed systems)
in next major version, break the ABI and support reporting all of them in
the dht_stats_alert../src/kademlia/routing_table.cpp:289 , m_bucket_size(bucket_size)
{
// bucket sizes must be a power of 2
TORRENT_ASSERT_VAL(((bucket_size - 1) & bucket_size) == 0, bucket_size);
TORRENT_UNUSED(log);
m_buckets.reserve(30);
}
int routing_table::bucket_limit(int bucket) const
{
if (!m_settings.get_bool(settings_pack::dht_extended_routing_table)) return m_bucket_size;
static const aux::array<int, 4> size_exceptions{{{16, 8, 4, 2}}};
if (bucket < size_exceptions.end_index())
return m_bucket_size * size_exceptions[bucket];
return m_bucket_size;
}
void routing_table::status(std::vector<dht_routing_bucket>& s) const
{
if (s.size() > m_buckets.size()) return;
s.clear();
for (auto const& i : m_buckets)
{
dht_routing_bucket b;
b.num_nodes = int(i.live_nodes.size());
b.num_replacements = int(i.replacements.size());
s.push_back(b);
}
}
#if TORRENT_ABI_VERSION == 1
| ||
relevance 0 | ../src/kademlia/routing_table.cpp:314 | arvidn note when it's across IPv4 and IPv6, adding (dht_global_nodes) would make sense. in the future though, where we may have one DHT node per external interface (which may be multiple of the same address family), then it becomes a bit trickier |
arvidn note
when it's across IPv4 and IPv6, adding (dht_global_nodes) would
make sense. in the future though, where we may have one DHT node
per external interface (which may be multiple of the same address
family), then it becomes a bit trickier../src/kademlia/routing_table.cpp:314 if (s.size() > m_buckets.size()) return;
s.clear();
for (auto const& i : m_buckets)
{
dht_routing_bucket b;
b.num_nodes = int(i.live_nodes.size());
b.num_replacements = int(i.replacements.size());
s.push_back(b);
}
}
#if TORRENT_ABI_VERSION == 1
void routing_table::status(session_status& s) const
{
int dht_nodes;
int dht_node_cache;
int ignore;
std::tie(dht_nodes, dht_node_cache, ignore) = size();
s.dht_nodes += dht_nodes;
s.dht_node_cache += dht_node_cache;
s.dht_global_nodes += num_global_nodes();
for (auto const& i : m_buckets)
{
dht_routing_bucket b;
b.num_nodes = int(i.live_nodes.size());
b.num_replacements = int(i.replacements.size());
#if TORRENT_ABI_VERSION == 1
b.last_active = 0;
#endif
s.dht_routing_table.push_back(b);
}
}
#endif
std::tuple<int, int, int> routing_table::size() const
{
int nodes = 0;
int replacements = 0;
int confirmed = 0;
for (auto const& i : m_buckets)
{
nodes += int(i.live_nodes.size());
confirmed += static_cast<int>(std::count_if(i.live_nodes.begin(), i.live_nodes.end()
, [](node_entry const& k) { return k.confirmed(); } ));
replacements += int(i.replacements.size());
}
return std::make_tuple(nodes, replacements, confirmed);
}
| ||
relevance 0 | ../src/kademlia/routing_table.cpp:518 | this need to take bucket "prefix" into account. It should be unified with add_node_impl() |
this need to take bucket "prefix" into account. It should be unified
with add_node_impl()../src/kademlia/routing_table.cpp:518{
for (auto i = m_buckets.begin() , end(m_buckets.end()); i != end; ++i)
{
for (auto j = i->replacements.begin(); j != i->replacements.end(); ++j)
{
if (j->addr() != ep.address()) continue;
if (j->port() != ep.port()) continue;
return std::make_tuple(&*j, i, &i->replacements);
}
for (auto j = i->live_nodes.begin(); j != i->live_nodes.end(); ++j)
{
if (j->addr() != ep.address()) continue;
if (j->port() != ep.port()) continue;
return std::make_tuple(&*j, i, &i->live_nodes);
}
}
return std::tuple<node_entry*, routing_table::table_t::iterator, bucket_t*>
{nullptr, m_buckets.end(), nullptr};
}
void routing_table::fill_from_replacements(table_t::iterator bucket)
{
bucket_t& b = bucket->live_nodes;
bucket_t& rb = bucket->replacements;
int const bucket_size = bucket_limit(int(std::distance(m_buckets.begin(), bucket)));
if (int(b.size()) >= bucket_size) return;
// sort by RTT first, to find the node with the lowest
// RTT that is pinged
std::sort(rb.begin(), rb.end());
while (int(b.size()) < bucket_size && !rb.empty())
{
auto j = std::find_if(rb.begin(), rb.end(), std::bind(&node_entry::pinged, _1));
if (j == rb.end()) break;
b.push_back(*j);
rb.erase(j);
}
}
void routing_table::prune_empty_bucket()
{
if (m_buckets.back().live_nodes.empty()
&& m_buckets.back().replacements.empty())
{
m_buckets.erase(m_buckets.end() - 1);
}
}
void routing_table::remove_node(node_entry* n, bucket_t* b)
| ||
relevance 0 | ../src/kademlia/node_id.cpp:66 | it's a little bit weird to return 159 - leading zeroes. It should probably be 160 - leading zeroes, but all other code in here is tuned to this expectation now, and it doesn't really matter (other than complexity) |
it's a little bit weird to return 159 - leading zeroes. It should
probably be 160 - leading zeroes, but all other code in here is tuned to
this expectation now, and it doesn't really matter (other than complexity)../src/kademlia/node_id.cpp:66
// returns the distance between the two nodes
// using the kademlia XOR-metric
node_id distance(node_id const& n1, node_id const& n2)
{
return n1 ^ n2;
}
// returns true if: distance(n1, ref) < distance(n2, ref)
bool compare_ref(node_id const& n1, node_id const& n2, node_id const& ref)
{
node_id const lhs = n1 ^ ref;
node_id const rhs = n2 ^ ref;
return lhs < rhs;
}
// returns n in: 2^n <= distance(n1, n2) < 2^(n+1)
// useful for finding out which bucket a node belongs to
int distance_exp(node_id const& n1, node_id const& n2)
{
return std::max(159 - distance(n1, n2).count_leading_zeroes(), 0);
}
int min_distance_exp(node_id const& n1, std::vector<node_id> const& ids)
{
TORRENT_ASSERT(ids.size() > 0);
int min = 160; // see distance_exp for the why of this constant
for (auto const& node_id : ids)
{
min = std::min(min, distance_exp(n1, node_id));
}
return min;
}
node_id generate_id_impl(address const& ip_, std::uint32_t r)
{
std::uint8_t* ip = nullptr;
static std::uint8_t const v4mask[] = { 0x03, 0x0f, 0x3f, 0xff };
static std::uint8_t const v6mask[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff };
std::uint8_t const* mask = nullptr;
int num_octets = 0;
address_v4::bytes_type b4{};
address_v6::bytes_type b6{};
if (ip_.is_v6())
{
b6 = ip_.to_v6().to_bytes();
ip = b6.data();
| ||
relevance 0 | ../include/libtorrent/piece_picker.hpp:802 | should this be allocated lazily? |
should this be allocated lazily?../include/libtorrent/piece_picker.hpp:802 // download list it may live in now
std::vector<downloading_piece>::iterator update_piece_state(
std::vector<downloading_piece>::iterator dp);
private:
#if TORRENT_USE_ASSERTS || TORRENT_USE_INVARIANT_CHECKS
index_range<download_queue_t> categories() const
{ return {{}, piece_picker::piece_pos::num_download_categories}; }
#endif
// the following vectors are mutable because they sometimes may
// be updated lazily, triggered by const functions
// this maps indices to number of peers that has this piece and
// index into the m_piece_info vectors.
// piece_pos::we_have_index means that we have the piece, so it
// doesn't exist in the piece_info buckets
// pieces with the filtered flag set doesn't have entries in
// the m_piece_info buckets either
mutable aux::vector<piece_pos, piece_index_t> m_piece_map;
// tracks the number of bytes in a specific piece that are part of a pad
// file. The padding is assumed to be at the end of the piece, and the
// blocks covered by the pad bytes are not picked by the piece picker
std::unordered_map<piece_index_t, int> m_pads_in_piece;
// when the adjacent_piece affinity is enabled, this contains the most
// recent "extents" of adjacent pieces that have been requested from
// this is mutable because it's updated by functions to pick pieces, which
// are const. That's an efficient place to update it, since it's being
// traversed already.
mutable std::vector<piece_extent_t> m_recent_extents;
// the number of bytes of pad file set in this piece picker
int m_num_pad_bytes = 0;
// the number of pad blocks that we already have
int m_have_pad_bytes = 0;
// the number of pad blocks part of filtered pieces we don't have
int m_filtered_pad_bytes = 0;
// the number of pad blocks we have that are also filtered
int m_have_filtered_pad_bytes = 0;
// the number of seeds. These are not added to
// the availability counters of the pieces
int m_seeds = 0;
// the number of pieces that have passed the hash check
| ||
relevance 0 | ../include/libtorrent/piece_picker.hpp:877 | it would be more intuitive to account "wanted" pieces instead of filtered |
it would be more intuitive to account "wanted" pieces
instead of filtered../include/libtorrent/piece_picker.hpp:877
// this holds the information of the blocks in partially downloaded
// pieces. the downloading_piece::info index point into this vector for
// its storage
aux::vector<block_info> m_block_info;
// these are block ranges in m_block_info that are free. The numbers
// in here, when multiplied by blocks_per_piece is the index to the
// first block in the range that's free to use by a new downloading_piece.
// this is a free-list.
std::vector<std::uint16_t> m_free_block_infos;
std::uint16_t m_blocks_in_last_piece = 0;
int m_piece_size = 0;
std::int64_t m_total_size = 0;
// the number of filtered pieces that we don't already
// have. total_number_of_pieces - number_of_pieces_we_have
// - num_filtered is supposed to the number of pieces
// we still want to download
int m_num_filtered = 0;
// the number of pieces we have that also are filtered
int m_num_have_filtered = 0;
// we have all pieces in the range [0, m_cursor)
// m_cursor is the first piece we don't have
piece_index_t m_cursor{0};
// we have all pieces in the range [m_reverse_cursor, end)
// m_reverse_cursor is the first piece where we also have
// all the subsequent pieces
piece_index_t m_reverse_cursor{0};
// the number of pieces we have (i.e. passed + flushed).
// This includes pieces that we have filtered but still have
int m_num_have = 0;
// if this is set to true, it means update_pieces()
// has to be called before accessing m_pieces.
mutable bool m_dirty = false;
public:
enum { max_pieces = (std::numeric_limits<int>::max)() - 1 };
};
}
#endif // TORRENT_PIECE_PICKER_HPP_INCLUDED
| ||
relevance 0 | ../include/libtorrent/torrent.hpp:280 | make this a raw pointer. perhaps keep the shared_ptr around further down the object to maintain an owner |
make this a raw pointer. perhaps keep the shared_ptr
around further down the object to maintain an owner../include/libtorrent/torrent.hpp:280#endif
};
struct TORRENT_EXTRA_EXPORT torrent_hot_members
{
torrent_hot_members(aux::session_interface& ses
, add_torrent_params const& p, bool session_paused);
protected:
// the piece picker. This is allocated lazily. When we don't
// have anything in the torrent (for instance, if it hasn't
// been started yet) or if we have everything, there is no
// picker. It's allocated on-demand the first time we need
// it in torrent::need_picker(). In order to tell the
// difference between having everything and nothing in
// the case there is no piece picker, see m_have_all.
std::unique_ptr<piece_picker> m_picker;
std::unique_ptr<hash_picker> m_hash_picker;
std::shared_ptr<torrent_info> m_torrent_file;
// This is the sum of all non-pad file sizes. In the next major version
// this is stored in file_storage and no longer need to be kept here.
std::int64_t m_size_on_disk = 0;
// a back reference to the session
// this torrent belongs to.
aux::session_interface& m_ses;
// this vector is sorted at all times, by the pointer value.
// use sorted_insert() and sorted_find() on it. The GNU STL
// implementation on Darwin uses significantly less memory to
// represent a vector than a set, and this set is typically
// relatively small, and it's cheap to copy pointers.
aux::vector<peer_connection*> m_connections;
// the scrape data from the tracker response, this
// is optional and may be 0xffffff
std::uint32_t m_complete:24;
// set to true when this torrent may not download anything
bool m_upload_mode:1;
// this is set to false as long as the connections
// of this torrent haven't been initialized. If we
// have metadata from the start, connections are
// initialized immediately, if we didn't have metadata,
// they are initialized right after files_checked().
// valid_resume_data() will return false as long as
// the connections aren't initialized, to avoid
| ||
relevance 0 | ../include/libtorrent/torrent.hpp:463 | make graceful pause also finish all sending blocks before disconnecting |
make graceful pause also finish all sending blocks
before disconnecting../include/libtorrent/torrent.hpp:463
void on_resume_data_checked(status_t status, storage_error const& error);
void on_force_recheck(status_t status, storage_error const& error);
void on_piece_hashed(aux::vector<sha256_hash> block_hashes
, piece_index_t piece, sha1_hash const& piece_hash
, storage_error const& error);
void files_checked();
void start_checking();
void start_announcing();
void stop_announcing();
void send_upload_only();
#ifndef TORRENT_DISABLE_SHARE_MODE
void send_share_mode();
void set_share_mode(bool s);
bool share_mode() const { return m_share_mode; }
#endif
bool graceful_pause() const { return m_graceful_pause_mode; }
torrent_flags_t flags() const;
void set_flags(torrent_flags_t flags, torrent_flags_t mask);
void set_upload_mode(bool b);
bool upload_mode() const { return m_upload_mode || m_graceful_pause_mode; }
bool is_upload_only() const { return is_finished() || upload_mode(); }
int seed_rank(aux::session_settings const& s) const;
void add_piece(piece_index_t piece, char const* data, add_piece_flags_t flags);
void add_piece_async(piece_index_t piece, std::vector<char> data, add_piece_flags_t flags);
void on_disk_write_complete(storage_error const& error
, peer_request const& p);
void set_progress_ppm(int p) { m_progress_ppm = std::uint32_t(p); }
struct read_piece_struct
{
boost::shared_array<char> piece_data;
int blocks_left;
bool fail;
error_code error;
};
void read_piece(piece_index_t);
void on_disk_read_complete(disk_buffer_holder, storage_error const&
, peer_request const&, std::shared_ptr<read_piece_struct>);
storage_mode_t storage_mode() const;
// this will flag the torrent as aborted. The main
| ||
relevance 0 | ../include/libtorrent/torrent.hpp:600 | make this flag a combination of the other ones |
make this flag a combination
of the other ones../include/libtorrent/torrent.hpp:600 void do_pause(bool was_paused = false);
void do_resume();
seconds32 finished_time() const;
seconds32 active_time() const;
seconds32 seeding_time() const;
seconds32 upload_mode_time() const;
bool is_paused() const;
bool is_torrent_paused() const { return m_paused; }
void force_recheck();
void save_resume_data(resume_data_flags_t flags);
bool need_save_resume_data(resume_data_flags_t flags) const
{
return bool(m_need_save_resume_data & flags);
}
void set_need_save_resume(resume_data_flags_t const flag)
{
m_need_save_resume_data |= torrent_handle::only_if_modified;
if (m_need_save_resume_data & flag) return;
m_need_save_resume_data |= flag;
state_updated();
}
bool is_auto_managed() const { return m_auto_managed; }
void auto_managed(bool a);
bool should_check_files() const;
bool delete_files(remove_flags_t options);
void peers_erased(std::vector<torrent_peer*> const& peers);
#if TORRENT_ABI_VERSION == 1
#if !TORRENT_NO_FPU
void file_progress_float(aux::vector<float, file_index_t>& fp);
#endif
#endif // TORRENT_ABI_VERSION
void post_piece_availability();
void piece_availability(aux::vector<int, piece_index_t>& avail) const;
void set_piece_priority(piece_index_t index, download_priority_t priority);
download_priority_t piece_priority(piece_index_t index) const;
void prioritize_pieces(aux::vector<download_priority_t, piece_index_t> const& pieces);
void prioritize_piece_list(std::vector<std::pair<piece_index_t, download_priority_t>> const& pieces);
void piece_priorities(aux::vector<download_priority_t, piece_index_t>*) const;
| ||
relevance 0 | ../include/libtorrent/torrent.hpp:1396 | this wastes 5 bits per file |
this wastes 5 bits per file../include/libtorrent/torrent.hpp:1396#ifndef TORRENT_DISABLE_EXTENSIONS
std::list<std::shared_ptr<torrent_plugin>> m_extensions;
#endif
// used for tracker announces
deadline_timer m_tracker_timer;
// used to detect when we are active or inactive for long enough
// to trigger the auto-manage logic
deadline_timer m_inactivity_timer;
// this is the upload and download statistics for the whole torrent.
// it's updated from all its peers once every second.
libtorrent::stat m_stat;
// -----------------------------
// this vector is allocated lazily. If no file priorities are
// ever changed, this remains empty. Any unallocated slot
// implicitly means the file has priority 4.
aux::vector<download_priority_t, file_index_t> m_file_priority;
// any file priority updates attempted while another file priority update
// is in-progress/outstanding with the disk I/O thread, are queued up in
// this dictionary. Once the outstanding update comes back, all of these
// are applied in one batch
std::map<file_index_t, download_priority_t> m_deferred_file_priorities;
// this object is used to track download progress of individual files
aux::file_progress m_file_progress;
// a queue of the most recent low-availability pieces we accessed on disk.
// These are good candidates for suggesting other peers to request from
// us.
aux::suggest_piece m_suggest_pieces;
aux::vector<aux::announce_entry> m_trackers;
#ifndef TORRENT_DISABLE_STREAMING
// this list is sorted by time_critical_piece::deadline
std::vector<time_critical_piece> m_time_critical_pieces;
#endif
std::string m_trackerid;
#if TORRENT_ABI_VERSION == 1
// deprecated in 1.1
std::string m_username;
std::string m_password;
#endif
std::string m_save_path;
| ||
relevance 0 | ../include/libtorrent/torrent.hpp:1711 | this member can probably be removed |
this member can probably be removed../include/libtorrent/torrent.hpp:1711 unsigned int m_num_uploads:24;
// 4 unused bits
// when this is true, this torrent supports peer exchange
bool m_enable_pex:1;
// set to true if the session IP filter applies to this
// torrent or not. Defaults to true.
bool m_apply_ip_filter:1;
// this is true when our effective inactive state is different from our
// actual inactive state. Whenever this state changes, there is a
// quarantine period until we change the effective state. This is to avoid
// flapping. If the state changes back during this period, we cancel the
// quarantine
bool m_pending_active_change:1;
// this is set to true if all piece layers were successfully loaded and
// validated. Only for v2 torrents
bool m_v2_piece_layers_validated:1;
// ----
// this is set to the connect boost quota for this torrent.
// After having received this many priority peer connection attempts, it
// falls back onto the steady state peer connection logic, driven by the
// session tick. Each tracker response, as long as this is non-zero, will
// attempt to connect to peers immediately and decrement the counter.
// We give torrents a connect boost when they are first added and then
// every time they resume from being paused.
std::uint8_t m_connect_boost_counter;
// ----
// the scrape data from the tracker response, this
// is optional and may be 0xffffff
std::uint32_t m_incomplete:24;
// true when the torrent should announce to
// the DHT
bool m_announce_to_dht:1;
// even if we're not built to support SSL torrents,
// remember that this is an SSL torrent, so that we don't
// accidentally start seeding it without any authentication.
bool m_ssl_torrent:1;
// this is set to true if we're trying to delete the
// files belonging to it. When set, don't write any
// more blocks to disk!
| ||
relevance 0 | ../include/libtorrent/peer_connection_interface.hpp:51 | make this interface smaller! |
make this interface smaller!../include/libtorrent/peer_connection_interface.hpp:51
*/
#ifndef TORRENT_PEER_CONNECTION_INTERFACE_HPP
#define TORRENT_PEER_CONNECTION_INTERFACE_HPP
#include "libtorrent/fwd.hpp"
#include "libtorrent/socket.hpp"
#include "libtorrent/error_code.hpp"
#include "libtorrent/alert_types.hpp"
#include "libtorrent/operations.hpp" // for operation_t enum
#include "libtorrent/units.hpp"
namespace libtorrent {
struct torrent_peer;
class stat;
using disconnect_severity_t = aux::strong_typedef<std::uint8_t, struct disconnect_severity_tag>;
struct TORRENT_EXTRA_EXPORT peer_connection_interface
{
static constexpr disconnect_severity_t normal{0};
static constexpr disconnect_severity_t failure{1};
static constexpr disconnect_severity_t peer_error{2};
#if TORRENT_USE_I2P
virtual std::string const& destination() const = 0;
virtual std::string const& local_i2p_endpoint() const = 0;
#endif
virtual tcp::endpoint const& remote() const = 0;
virtual tcp::endpoint local_endpoint() const = 0;
virtual void disconnect(error_code const& ec
, operation_t op, disconnect_severity_t = peer_connection_interface::normal) = 0;
virtual peer_id const& pid() const = 0;
virtual peer_id our_pid() const = 0;
virtual void set_holepunch_mode() = 0;
virtual torrent_peer* peer_info_struct() const = 0;
virtual void set_peer_info(torrent_peer* pi) = 0;
virtual bool is_outgoing() const = 0;
virtual void add_stat(std::int64_t downloaded, std::int64_t uploaded) = 0;
virtual bool fast_reconnect() const = 0;
virtual bool is_choked() const = 0;
virtual bool failed() const = 0;
virtual stat const& statistics() const = 0;
virtual void get_peer_info(peer_info& p) const = 0;
#ifndef TORRENT_DISABLE_LOGGING
virtual bool should_log(peer_log_alert::direction_t direction) const = 0;
virtual void peer_log(peer_log_alert::direction_t direction
, char const* event, char const* fmt = "", ...) const noexcept TORRENT_FORMAT(4,5) = 0;
#endif
| ||
relevance 0 | ../include/libtorrent/announce_entry.hpp:76 | include the number of peers received from this tracker, at last announce |
include the number of peers received from this tracker, at last
announce../include/libtorrent/announce_entry.hpp:76
struct TORRENT_EXPORT announce_infohash
{
// internal
TORRENT_UNEXPORT announce_infohash();
// if this tracker has returned an error or warning message
// that message is stored here
std::string message;
// if this tracker failed the last time it was contacted
// this error code specifies what error occurred
error_code last_error;
// the time of next tracker announce
time_point32 next_announce = (time_point32::min)();
// no announces before this time
time_point32 min_announce = (time_point32::min)();
// these are either -1 or the scrape information this tracker last
// responded with. *incomplete* is the current number of downloaders in
// the swarm, *complete* is the current number of seeds in the swarm and
// *downloaded* is the cumulative number of completed downloads of this
// torrent, since the beginning of time (from this tracker's point of
// view).
// if this tracker has returned scrape data, these fields are filled in
// with valid numbers. Otherwise they are set to -1. ``incomplete`` counts
// the number of current downloaders. ``complete`` counts the number of
// current peers completed the download, or "seeds". ``downloaded`` is the
// cumulative number of completed downloads.
int scrape_incomplete = -1;
int scrape_complete = -1;
int scrape_downloaded = -1;
// the number of times in a row we have failed to announce to this
// tracker.
std::uint8_t fails : 7;
// true while we're waiting for a response from the tracker.
bool updating : 1;
// set to true when we get a valid response from an announce
// with event=started. If it is set, we won't send start in the subsequent
// announces.
bool start_sent : 1;
// set to true when we send a event=completed.
bool complete_sent : 1;
| ||
relevance 0 | ../include/libtorrent/i2p_stream.hpp:539 | make this a string_view |
make this a string_view../include/libtorrent/i2p_stream.hpp:539
char tmp[20];
aux::random_bytes(tmp);
m_session_id.resize(sizeof(tmp)*2);
aux::to_hex(tmp, &m_session_id[0]);
m_sam_socket = std::make_shared<i2p_stream>(m_io_service);
m_sam_socket->set_proxy(m_hostname, m_port);
m_sam_socket->set_command(i2p_stream::cmd_create_session);
m_sam_socket->set_session_id(m_session_id.c_str());
m_sam_socket->set_session_options(session_options);
ADD_OUTSTANDING_ASYNC("i2p_stream::on_sam_connect");
m_sam_socket->async_connect(tcp::endpoint(), wrap_allocator(
[this,s=m_sam_socket](error_code const& ec, Handler hn) {
on_sam_connect(ec, s, std::move(hn));
}, std::move(handler)));
}
void close(error_code&);
char const* session_id() const { return m_session_id.c_str(); }
std::string const& local_endpoint() const { return m_i2p_local_endpoint; }
template <typename Handler>
void async_name_lookup(char const* name, Handler handler)
{
if (m_state == sam_idle && m_name_lookup.empty() && is_open())
do_name_lookup(name, std::move(handler));
else
m_name_lookup.emplace_back(std::string(name)
, std::move(handler));
}
private:
template <typename Handler>
void on_sam_connect(error_code const& ec, std::shared_ptr<i2p_stream>, Handler h)
{
COMPLETE_ASYNC("i2p_stream::on_sam_connect");
m_state = sam_idle;
if (ec)
{
h(ec);
return;
}
do_name_lookup("ME", wrap_allocator(
[this](error_code const& e, char const* dst, Handler hn) {
set_local_endpoint(e, dst, std::move(hn));
}, std::move(h)));
| ||
relevance 0 | ../include/libtorrent/proxy_base.hpp:207 | it would be nice to remember the bind port and bind once we know where the proxy is m_sock.bind(endpoint, ec); |
it would be nice to remember the bind port and bind once we know where the proxy is
m_sock.bind(endpoint, ec);../include/libtorrent/proxy_base.hpp:207
void cancel()
{
m_sock.cancel();
}
void cancel(error_code& ec)
{
m_sock.cancel(ec);
}
void bind(endpoint_type const& /* endpoint */, error_code& /* ec */)
{
// the reason why we ignore binds here is because we don't
// (necessarily) yet know what address family the proxy
// will resolve to, and binding to the wrong one would
// break our connection attempt later. The caller here
// doesn't necessarily know that we're proxying, so this
// bind address is based on the final endpoint, not the
// proxy.
}
#ifndef BOOST_NO_EXCEPTIONS
void open(protocol_type const&)
{
// m_sock.open(p);
}
#endif
void open(protocol_type const&, error_code&)
{
// we need to ignore this for the same reason as stated
// for ignoring bind()
// m_sock.open(p, ec);
}
#ifndef BOOST_NO_EXCEPTIONS
void close()
{
m_remote_endpoint = endpoint_type();
m_sock.close();
m_resolver.cancel();
}
#endif
void close(error_code& ec)
{
m_remote_endpoint = endpoint_type();
m_sock.close(ec);
m_resolver.cancel();
}
| ||
relevance 0 | ../include/libtorrent/socket_type.hpp:60 | move to aux |
move to aux../include/libtorrent/socket_type.hpp:60namespace libtorrent {
// A type describing kinds of sockets involved in various operations or events.
enum class socket_type_t : std::uint8_t {
tcp,
socks5,
http,
utp,
i2p,
tcp_ssl,
socks5_ssl,
http_ssl,
utp_ssl,
#if TORRENT_ABI_VERSION <= 2
udp TORRENT_DEPRECATED_ENUM = utp,
#endif
};
// return a short human readable name for types of socket
char const* socket_type_name(socket_type_t);
}
#endif
| ||
relevance 0 | ../include/libtorrent/upnp.hpp:162 | support using the windows API for UPnP operations as well |
support using the windows API for UPnP operations as well../include/libtorrent/upnp.hpp:162{
bool in_error_code = false;
bool exit = false;
int error_code = -1;
};
struct ip_address_parse_state: error_code_parse_state
{
bool in_ip_address = false;
std::string ip_address;
};
TORRENT_EXTRA_EXPORT void find_control_url(int type, string_view, parse_state& state);
TORRENT_EXTRA_EXPORT void find_error_code(int type, string_view string
, error_code_parse_state& state);
TORRENT_EXTRA_EXPORT void find_ip_address(int type, string_view string
, ip_address_parse_state& state);
struct TORRENT_EXTRA_EXPORT upnp final
: std::enable_shared_from_this<upnp>
, single_threaded
{
upnp(io_context& ios
, aux::session_settings const& settings
, aux::portmap_callback& cb
, address_v4 listen_address
, address_v4 netmask
, std::string listen_device
, aux::listen_socket_handle ls);
~upnp();
void start();
// Attempts to add a port mapping for the specified protocol. Valid protocols are
// ``upnp::tcp`` and ``upnp::udp`` for the UPnP class and ``natpmp::tcp`` and
// ``natpmp::udp`` for the NAT-PMP class.
//
// ``external_port`` is the port on the external address that will be mapped. This
// is a hint, you are not guaranteed that this port will be available, and it may
// end up being something else. In the portmap_alert_ notification, the actual
// external port is reported.
//
// ``local_port`` is the port in the local machine that the mapping should forward
// to.
//
// The return value is an index that identifies this port mapping. This is used
// to refer to mappings that fails or succeeds in the portmap_error_alert_ and
// portmap_alert_ respectively. If The mapping fails immediately, the return value
// is -1, which means failure. There will not be any error alert notification for
| ||
relevance 0 | ../include/libtorrent/hash_picker.hpp:155 | support batched adding of block hashes for reduced overhead? |
support batched adding of block hashes for reduced overhead?../include/libtorrent/hash_picker.hpp:155 // the number of hashes in the range
int count = 0;
int proof_layers = 0;
};
// validates the hash_request, to ensure its invariant as well as matching
// the torrent's file_storage and the number of hashes accompanying the
// request
TORRENT_EXTRA_EXPORT
bool validate_hash_request(hash_request const& hr, file_storage const& fs);
class TORRENT_EXTRA_EXPORT hash_picker
{
public:
hash_picker(file_storage const& files
, aux::vector<aux::merkle_tree, file_index_t>& trees);
hash_request pick_hashes(typed_bitfield<piece_index_t> const& pieces);
add_hashes_result add_hashes(hash_request const& req, span<sha256_hash const> hashes);
set_block_hash_result set_block_hash(piece_index_t piece, int offset, sha256_hash const& h);
void hashes_rejected(hash_request const& req);
void verify_block_hashes(piece_index_t index);
// do we know the piece layer hash for a piece
bool have_hash(piece_index_t index) const;
// do we know all the block hashes for a file?
bool have_all(file_index_t file) const;
bool have_all() const;
bool piece_verified(piece_index_t piece) const;
int piece_layer() const { return m_piece_layer; }
private:
// returns the number of proof layers needed to verify the node's hash
int layers_to_verify(node_index idx) const;
int file_num_layers(file_index_t idx) const;
struct piece_hash_request
{
time_point last_request = min_time();
int num_requests = 0;
bool have = false;
};
struct priority_block_request
{
priority_block_request(file_index_t const f, int const b)
: file(f), block(b) {}
file_index_t file;
int block;
| ||
relevance 0 | ../include/libtorrent/string_view.hpp:40 | replace this by the standard string_view in C++17 |
replace this by the standard string_view in C++17../include/libtorrent/string_view.hpp:40AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TORRENT_STRING_VIEW_HPP_INCLUDED
#define TORRENT_STRING_VIEW_HPP_INCLUDED
#include <boost/version.hpp>
#include "libtorrent/aux_/disable_warnings_push.hpp"
#if BOOST_VERSION < 106100
#include <boost/utility/string_ref.hpp>
#include <cstring> // for strchr
namespace libtorrent {
using string_view = boost::string_ref;
using wstring_view = boost::wstring_ref;
// internal
inline string_view::size_type find_first_of(string_view const v, char const c
, string_view::size_type pos)
{
while (pos < v.size())
{
if (v[pos] == c) return pos;
++pos;
}
return string_view::npos;
}
// internal
inline string_view::size_type find_first_of(string_view const v, char const* c
, string_view::size_type pos)
{
while (pos < v.size())
{
if (std::strchr(c, v[pos]) != nullptr) return pos;
++pos;
}
return string_view::npos;
| ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:218 | make this a raw pointer (to save size in the first cache line) and make the constructor take a raw pointer. torrent objects should always outlive their peers |
make this a raw pointer (to save size in
the first cache line) and make the constructor
take a raw pointer. torrent objects should always
outlive their peers../include/libtorrent/peer_connection.hpp:218 , m_snubbed(false)
, m_interesting(false)
, m_choked(true)
, m_ignore_stats(false)
{}
// explicitly disallow assignment, to silence msvc warning
peer_connection_hot_members& operator=(peer_connection_hot_members const&) = delete;
protected:
// the pieces the other end have
typed_bitfield<piece_index_t> m_have_piece;
// this is the torrent this connection is
// associated with. If the connection is an
// incoming connection, this is set to zero
// until the info_hash is received. Then it's
// set to the torrent it belongs to.
std::weak_ptr<torrent> m_torrent;
public:
// a back reference to the session
// the peer belongs to.
aux::session_interface& m_ses;
// settings that apply to this peer
aux::session_settings const& m_settings;
protected:
// this is true if this connection has been added
// to the list of connections that will be closed.
bool m_disconnecting:1;
// this is true until this socket has become
// writable for the first time (i.e. the
// connection completed). While connecting
// the timeout will not be triggered. This is
// because windows XP SP2 may delay connection
// attempts, which means that the connection
// may not even have been attempted when the
// time out is reached.
bool m_connecting:1;
// this is set to true if the last time we tried to
// pick a piece to download, we could only find
// blocks that were already requested from other
// peers. In this case, we should not try to pick
| ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:1026 | factor this out into its own class with a virtual interface torrent and session should implement this interface |
factor this out into its own class with a virtual interface
torrent and session should implement this interface../include/libtorrent/peer_connection.hpp:1026
// the local endpoint for this peer, i.e. our address
// and our port. If this is set for outgoing connections
// before the connection completes, it means we want to
// force the connection to be bound to the specified interface.
// if it ends up being bound to a different local IP, the connection
// is closed.
tcp::endpoint m_local;
// remote peer's id
peer_id m_peer_id;
protected:
template <typename Fun, typename... Args>
void wrap(Fun f, Args&&... a);
// statistics about upload and download speeds
// and total amount of uploads and downloads for
// this peer
stat m_statistics;
// the number of outstanding bytes expected
// to be received by extensions
int m_extension_outstanding_bytes = 0;
// the number of time critical requests
// queued up in the m_request_queue that
// soon will be committed to the download
// queue. This is included in download_queue_time()
// so that it can be used while adding more
// requests and take the previous requests
// into account without submitting it all
// immediately
std::uint16_t m_queued_time_critical = 0;
// the number of bytes we are currently reading
// from disk, that will be added to the send
// buffer as soon as they complete
int m_reading_bytes = 0;
// options used for the piece picker. These flags will
// be augmented with flags controlled by other settings
// like sequential download etc. These are here to
// let plugins control flags that should always be set
picker_options_t m_picker_options{};
// the number of invalid piece-requests
// we have got from this peer. If the request
// queue gets empty, and there have been
// invalid requests, we can assume the
| ||
relevance 0 | ../include/libtorrent/identify_client.hpp:48 | hide this declaration when deprecated functions are disabled, and remove its internal use |
hide this declaration when deprecated functions are disabled, and
remove its internal use../include/libtorrent/identify_client.hpp:48CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TORRENT_IDENTIFY_CLIENT_HPP_INCLUDED
#define TORRENT_IDENTIFY_CLIENT_HPP_INCLUDED
#include "libtorrent/config.hpp"
#if TORRENT_ABI_VERSION == 1
#include "libtorrent/aux_/disable_warnings_push.hpp"
#include <boost/optional.hpp>
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#endif
#include "libtorrent/peer_id.hpp"
#include "libtorrent/fingerprint.hpp"
namespace libtorrent {
namespace aux {
TORRENT_EXTRA_EXPORT
std::string identify_client_impl(const peer_id& p);
}
// these functions don't really need to be public. This mechanism of
// advertising client software and version is also out-dated.
// This function can can be used to extract a string describing a client
// version from its peer-id. It will recognize most clients that have this
// kind of identification in the peer-id.
TORRENT_DEPRECATED_EXPORT
std::string identify_client(const peer_id& p);
#if TORRENT_ABI_VERSION == 1
#include "libtorrent/aux_/disable_deprecation_warnings_push.hpp"
// Returns an optional fingerprint if any can be identified from the peer
// id. This can be used to automate the identification of clients. It will
// not be able to identify peers with non- standard encodings. Only Azureus
// style, Shadow's style and Mainline style.
TORRENT_DEPRECATED_EXPORT
boost::optional<fingerprint>
client_fingerprint(peer_id const& p);
#include "libtorrent/aux_/disable_warnings_pop.hpp"
| ||
relevance 0 | ../include/libtorrent/socks5_stream.hpp:197 | we could bind the socket here, since we know what the target endpoint is of the proxy |
we could bind the socket here, since we know what the
target endpoint is of the proxy../include/libtorrent/socks5_stream.hpp:197 }, std::move(handler)));
}
private:
template <typename Handler>
void name_lookup(error_code const& e, tcp::resolver::results_type ips
, Handler h)
{
COMPLETE_ASYNC("socks5_stream::name_lookup");
if (handle_error(e, std::move(h))) return;
auto i = ips.begin();
if (!m_sock.is_open())
{
error_code ec;
m_sock.open(i->endpoint().protocol(), ec);
if (handle_error(ec, std::move(h))) return;
}
ADD_OUTSTANDING_ASYNC("socks5_stream::connected");
m_sock.async_connect(i->endpoint(), wrap_allocator(
[this](error_code const& ec, Handler hn)
{ connected(ec, std::move(hn)); }, std::move(h)));
}
template <typename Handler>
void connected(error_code const& e, Handler h)
{
COMPLETE_ASYNC("socks5_stream::connected");
if (handle_error(e, std::move(h))) return;
using namespace libtorrent::aux;
if (m_version == 5)
{
// send SOCKS5 authentication methods
m_buffer.resize(m_user.empty()?3:4);
char* p = &m_buffer[0];
write_uint8(5, p); // SOCKS VERSION 5
if (m_user.empty())
{
write_uint8(1, p); // 1 authentication method (no auth)
write_uint8(0, p); // no authentication
}
else
{
write_uint8(2, p); // 2 authentication methods
write_uint8(0, p); // no authentication
write_uint8(2, p); // username/password
}
ADD_OUTSTANDING_ASYNC("socks5_stream::handshake1");
| ||
relevance 0 | ../include/libtorrent/torrent_info.hpp:729 | change the type to std::shared_ptr in C++17 it is used as if immutable, it cannot be const for technical reasons right now. |
change the type to std::shared_ptr in C++17
it is used as if immutable, it cannot be const for technical reasons
right now.../include/libtorrent/torrent_info.hpp:729
// these are the collections from outside of the info-dict. These are
// owning strings, since we only keep the info-section around, these
// cannot be pointers into that buffer.
std::vector<std::string> m_owned_collections;
#if TORRENT_ABI_VERSION <= 2
// if this is a merkle torrent, this is the merkle
// tree. It has space for merkle_num_nodes(merkle_num_leafs(num_pieces))
// hashes
aux::vector<sha1_hash> m_merkle_tree;
#endif
// v2 merkle tree for each file
// the actual hash buffers are always divisible by 32 (sha256_hash::size())
aux::vector<aux::vector<char>, file_index_t> m_piece_layers;
// this is a copy of the info section from the torrent.
// it use maintained in this flat format in order to
// make it available through the metadata extension
boost::shared_array<char> m_info_section;
// if a comment is found in the torrent file
// this will be set to that comment
std::string m_comment;
// an optional string naming the software used
// to create the torrent file
std::string m_created_by;
// the info section parsed. points into m_info_section
// parsed lazily
mutable bdecode_node m_info_dict;
// if a creation date is found in the torrent file
// this will be set to that, otherwise it'll be
// 1970, Jan 1
std::time_t m_creation_date = 0;
// the hash(es) that identify this torrent
info_hash_t m_info_hash;
// this is the offset into the m_info_section buffer to the first byte of
// the first SHA-1 hash
std::int32_t m_piece_hashes = 0;
// the number of bytes in m_info_section
std::int32_t m_info_section_size = 0;
// this is used when creating a torrent. If there's
// only one file there are cases where it's impossible
| ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:485 | some space could be saved here by making gauges 32 bits |
some space could be saved here by making gauges 32 bits../include/libtorrent/performance_counters.hpp:485 | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:486 | restore these to regular integers. Instead have one copy of the counters per thread and collect them at convenient synchronization points |
restore these to regular integers. Instead have one copy
of the counters per thread and collect them at convenient
synchronization points../include/libtorrent/performance_counters.hpp:486#ifdef ATOMIC_LLONG_LOCK_FREE
#define TORRENT_COUNTER_NOEXCEPT noexcept
#else
#define TORRENT_COUNTER_NOEXCEPT
#endif
counters() TORRENT_COUNTER_NOEXCEPT;
counters(counters const&) TORRENT_COUNTER_NOEXCEPT;
counters& operator=(counters const&) & TORRENT_COUNTER_NOEXCEPT;
// returns the new value
std::int64_t inc_stats_counter(int c, std::int64_t value = 1) TORRENT_COUNTER_NOEXCEPT;
std::int64_t operator[](int i) const TORRENT_COUNTER_NOEXCEPT;
void set_value(int c, std::int64_t value) TORRENT_COUNTER_NOEXCEPT;
void blend_stats_counter(int c, std::int64_t value, int ratio) TORRENT_COUNTER_NOEXCEPT;
private:
#ifdef ATOMIC_LLONG_LOCK_FREE
aux::array<std::atomic<std::int64_t>, num_counters> m_stats_counter;
#else
// if the atomic type isn't lock-free, use a single lock instead, for
// the whole array
mutable std::mutex m_mutex;
aux::array<std::int64_t, num_counters> m_stats_counter;
#endif
};
}
#endif
| ||
relevance 0 | ../include/libtorrent/kademlia/msg.hpp:87 | move this to its own .hpp/.cpp pair? |
move this to its own .hpp/.cpp pair?../include/libtorrent/kademlia/msg.hpp:87 int flags;
enum {
// this argument is optional, parsing will not
// fail if it's not present
optional = 1,
// for dictionaries, the following entries refer
// to child nodes to this node, up until and including
// the next item that has the last_child flag set.
// these flags are nestable
parse_children = 2,
// this is the last item in a child dictionary
last_child = 4,
// the size argument refers to that the size
// has to be divisible by the number, instead
// of having that exact size
size_divisible = 8
};
};
TORRENT_EXTRA_EXPORT bool verify_message_impl(bdecode_node const& message, span<key_desc_t const> desc
, span<bdecode_node> ret, span<char> error);
// verifies that a message has all the required
// entries and returns them in ret
template <int Size>
bool verify_message(bdecode_node const& msg, key_desc_t const (&desc)[Size]
, bdecode_node (&ret)[Size], span<char> error)
{
return verify_message_impl(msg, desc, ret, error);
}
}
}
#endif
| ||
relevance 0 | ../include/libtorrent/kademlia/item.hpp:61 | since this is a public function, it should probably be moved out of this header and into one with other public functions. |
since this is a public function, it should probably be moved
out of this header and into one with other public functions.../include/libtorrent/kademlia/item.hpp:61#include <libtorrent/span.hpp>
#include <libtorrent/kademlia/types.hpp>
namespace libtorrent {
namespace dht {
// calculate the target hash for an immutable item.
TORRENT_EXTRA_EXPORT sha1_hash item_target_id(span<char const> v);
// calculate the target hash for a mutable item.
TORRENT_EXTRA_EXPORT sha1_hash item_target_id(span<char const> salt
, public_key const& pk);
TORRENT_EXTRA_EXPORT bool verify_mutable_item(
span<char const> v
, span<char const> salt
, sequence_number seq
, public_key const& pk
, signature const& sig);
// given a byte range ``v`` and an optional byte range ``salt``, a
// sequence number, public key ``pk`` (must be 32 bytes) and a secret key
// ``sk`` (must be 64 bytes), this function produces a signature which
// is written into a 64 byte buffer pointed to by ``sig``. The caller
// is responsible for allocating the destination buffer that's passed in
// as the ``sig`` argument. Typically it would be allocated on the stack.
TORRENT_EXPORT signature sign_mutable_item(
span<char const> v
, span<char const> salt
, sequence_number seq
, public_key const& pk
, secret_key const& sk);
class TORRENT_EXTRA_EXPORT item
{
public:
item() {}
item(public_key const& pk, span<char const> salt);
explicit item(entry v);
item(entry v
, span<char const> salt
, sequence_number seq
, public_key const& pk
, secret_key const& sk);
explicit item(bdecode_node const& v);
void assign(entry v);
void assign(entry v, span<char const> salt
, sequence_number seq
, public_key const& pk
| ||
relevance 0 | ../include/libtorrent/aux_/deprecated.hpp:47 | figure out which version of clang this is supported in |
figure out which version of clang this is supported in../include/libtorrent/aux_/deprecated.hpp:47CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TORRENT_DEPRECATED_HPP_INCLUDED
#define TORRENT_DEPRECATED_HPP_INCLUDED
#if !defined TORRENT_BUILDING_LIBRARY
# define TORRENT_DEPRECATED [[deprecated]]
#else
# define TORRENT_DEPRECATED
#endif
#if defined __clang__
// ====== CLANG ========
# if !defined TORRENT_BUILDING_LIBRARY
# define TORRENT_DEPRECATED_ENUM __attribute__ ((deprecated))
# endif
#elif defined __GNUC__
// ======== GCC ========
// deprecation markup is only enabled when libtorrent
// headers are included by clients, not while building
// libtorrent itself
# if __GNUC__ >= 6 && !defined TORRENT_BUILDING_LIBRARY
# define TORRENT_DEPRECATED_ENUM __attribute__ ((deprecated))
# endif
#endif
#ifndef TORRENT_DEPRECATED_ENUM
#define TORRENT_DEPRECATED_ENUM
#endif
#endif
| ||
relevance 0 | ../include/libtorrent/aux_/session_interface.hpp:212 | it would be nice to not have this be part of session_interface |
it would be nice to not have this be part of session_interface../include/libtorrent/aux_/session_interface.hpp:212 virtual void deferred_submit_jobs() = 0;
virtual std::uint16_t listen_port() const = 0;
virtual std::uint16_t ssl_listen_port() const = 0;
virtual int listen_port(aux::transport ssl, address const& local_addr) = 0;
virtual void for_each_listen_socket(std::function<void(aux::listen_socket_handle const&)> f) = 0;
// ask for which interface and port to bind outgoing peer connections on
virtual tcp::endpoint bind_outgoing_socket(socket_type& s, address const&
remote_address, error_code& ec) const = 0;
virtual bool verify_bound_address(address const& addr, bool utp
, error_code& ec) = 0;
#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
virtual std::vector<std::shared_ptr<torrent>> find_collection(
std::string const& collection) const = 0;
#endif
virtual proxy_settings proxy() const = 0;
#if TORRENT_USE_I2P
virtual char const* i2p_session() const = 0;
virtual std::string const& local_i2p_endpoint() const = 0;
#endif
virtual void prioritize_connections(std::weak_ptr<torrent> t) = 0;
virtual void trigger_auto_manage() = 0;
virtual void apply_settings_pack(std::shared_ptr<settings_pack> pack) = 0;
virtual session_settings const& settings() const = 0;
virtual void queue_tracker_request(tracker_request req
, std::weak_ptr<request_callback> c) = 0;
// peer-classes
virtual void set_peer_classes(peer_class_set* s, address const& a, socket_type_t st) = 0;
virtual peer_class_pool const& peer_classes() const = 0;
virtual peer_class_pool& peer_classes() = 0;
virtual bool ignore_unchoke_slots_set(peer_class_set const& set) const = 0;
virtual int copy_pertinent_channels(peer_class_set const& set
, int channel, bandwidth_channel** dst, int m) = 0;
virtual int use_quota_overhead(peer_class_set& set, int amount_down, int amount_up) = 0;
virtual bandwidth_manager* get_bandwidth_manager(int channel) = 0;
virtual void sent_bytes(int bytes_payload, int bytes_protocol) = 0;
virtual void received_bytes(int bytes_payload, int bytes_protocol) = 0;
virtual void trancieve_ip_packet(int bytes, bool ipv6) = 0;
| ||
relevance 0 | ../include/libtorrent/aux_/announce_entry.hpp:74 | include the number of peers received from this tracker, at last announce |
include the number of peers received from this tracker, at last
announce../include/libtorrent/aux_/announce_entry.hpp:74namespace aux {
struct TORRENT_EXTRA_EXPORT announce_infohash
{
announce_infohash();
// if this tracker has returned an error or warning message
// that message is stored here
std::string message;
// if this tracker failed the last time it was contacted
// this error code specifies what error occurred
error_code last_error;
// the time of next tracker announce
time_point32 next_announce = (time_point32::min)();
// no announces before this time
time_point32 min_announce = (time_point32::min)();
// these are either -1 or the scrape information this tracker last
// responded with. *incomplete* is the current number of downloaders in
// the swarm, *complete* is the current number of seeds in the swarm and
// *downloaded* is the cumulative number of completed downloads of this
// torrent, since the beginning of time (from this tracker's point of
// view).
// if this tracker has returned scrape data, these fields are filled in
// with valid numbers. Otherwise they are set to -1. ``incomplete`` counts
// the number of current downloaders. ``complete`` counts the number of
// current peers completed the download, or "seeds". ``downloaded`` is the
// cumulative number of completed downloads.
int scrape_incomplete = -1;
int scrape_complete = -1;
int scrape_downloaded = -1;
// the number of times in a row we have failed to announce to this
// tracker.
std::uint8_t fails : 7;
// true while we're waiting for a response from the tracker.
bool updating : 1;
// set to true when we get a valid response from an announce
// with event=started. If it is set, we won't send start in the subsequent
// announces.
bool start_sent : 1;
// set to true when we send a event=completed.
bool complete_sent : 1;
| ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:265 | make these direct members and generate shared_ptrs to them which alias the listen_socket_t shared_ptr |
make these direct members and generate shared_ptrs to them
which alias the listen_socket_t shared_ptr../include/libtorrent/aux_/session_impl.hpp:265 if (udp_sock) return udp_sock->sock.local_port();
return 0;
}
// 0 is natpmp 1 is upnp
// the order of these arrays determines the priority in
// which their ports will be announced to peers
aux::array<listen_port_mapping, 2, portmap_transport> tcp_port_mapping;
aux::array<listen_port_mapping, 2, portmap_transport> udp_port_mapping;
// indicates whether this is an SSL listen socket or not
transport ssl = transport::plaintext;
listen_socket_flags_t flags = accept_incoming;
// the actual sockets (TCP listen socket and UDP socket)
// An entry does not necessarily have a UDP or TCP socket. One of these
// pointers may be nullptr!
// These must be shared_ptr to avoid a dangling reference if an
// incoming packet is in the event queue when the socket is erased
std::shared_ptr<tcp::acceptor> sock;
std::shared_ptr<aux::session_udp_socket> udp_sock;
// since udp packets are expected to be dispatched frequently, this saves
// time on handler allocation every time we read again.
aux::handler_storage<aux::udp_handler_max_size, aux::udp_handler> udp_handler_storage;
std::shared_ptr<natpmp> natpmp_mapper;
std::shared_ptr<upnp> upnp_mapper;
std::shared_ptr<struct lsd> lsd;
// set to true when we receive an incoming connection from this listen
// socket
bool incoming_connection = false;
};
struct TORRENT_EXTRA_EXPORT listen_endpoint_t
{
listen_endpoint_t(address const& adr, int p, std::string dev, transport s
, listen_socket_flags_t f, address const& nmask = address{})
: addr(adr), netmask(nmask), port(p), device(std::move(dev)), ssl(s), flags(f) {}
bool operator==(listen_endpoint_t const& o) const
{
return addr == o.addr
&& port == o.port
&& device == o.device
&& ssl == o.ssl
&& flags == o.flags;
}
| ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:1068 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:1068#ifdef TORRENT_SSL_PEERS
void on_incoming_utp_ssl(socket_type s);
void ssl_handshake(error_code const& ec, socket_type* s);
#endif
// round-robin index into m_outgoing_interfaces
mutable std::uint8_t m_interface_index = 0;
std::shared_ptr<listen_socket_t> setup_listener(
listen_endpoint_t const& lep, error_code& ec);
#ifndef TORRENT_DISABLE_DHT
dht::dht_state m_dht_state;
#endif
// this is initialized to the unchoke_interval
// session_setting and decreased every second.
// when it reaches zero, it is reset to the
// unchoke_interval and the unchoke set is
// recomputed.
int m_unchoke_time_scaler = 0;
// this is used to decide when to recalculate which
// torrents to keep queued and which to activate
| ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:1073 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:1073
// round-robin index into m_outgoing_interfaces
mutable std::uint8_t m_interface_index = 0;
std::shared_ptr<listen_socket_t> setup_listener(
listen_endpoint_t const& lep, error_code& ec);
#ifndef TORRENT_DISABLE_DHT
dht::dht_state m_dht_state;
#endif
// this is initialized to the unchoke_interval
// session_setting and decreased every second.
// when it reaches zero, it is reset to the
// unchoke_interval and the unchoke set is
// recomputed.
int m_unchoke_time_scaler = 0;
// this is used to decide when to recalculate which
// torrents to keep queued and which to activate
int m_auto_manage_time_scaler = 0;
// works like unchoke_time_scaler but it
// is only decreased when the unchoke set
// is recomputed, and when it reaches zero,
// the optimistic unchoke is moved to another peer.
| ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:1080 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:1080
#ifndef TORRENT_DISABLE_DHT
dht::dht_state m_dht_state;
#endif
// this is initialized to the unchoke_interval
// session_setting and decreased every second.
// when it reaches zero, it is reset to the
// unchoke_interval and the unchoke set is
// recomputed.
int m_unchoke_time_scaler = 0;
// this is used to decide when to recalculate which
// torrents to keep queued and which to activate
int m_auto_manage_time_scaler = 0;
// works like unchoke_time_scaler but it
// is only decreased when the unchoke set
// is recomputed, and when it reaches zero,
// the optimistic unchoke is moved to another peer.
int m_optimistic_unchoke_time_scaler = 0;
// works like unchoke_time_scaler. Each time
// it reaches 0, and all the connections are
// used, the worst connection will be disconnected
// from the torrent with the most peers
int m_disconnect_time_scaler = 90;
// when this scaler reaches zero, it will
// scrape one of the auto managed, paused,
// torrents.
int m_auto_scrape_time_scaler = 180;
// statistics gathered from all torrents.
stat m_stat;
// implements session_interface
void sent_bytes(int bytes_payload, int bytes_protocol) override;
void received_bytes(int bytes_payload, int bytes_protocol) override;
void trancieve_ip_packet(int bytes, bool ipv6) override;
void sent_syn(bool ipv6) override;
void received_synack(bool ipv6) override;
#if TORRENT_ABI_VERSION == 1
int m_peak_up_rate = 0;
#endif
void on_tick(error_code const& e);
void try_connect_more_peers();
void auto_manage_checking_torrents(std::vector<torrent*>& list
| ||
relevance 0 | ../include/libtorrent/aux_/pool.hpp:49 | ensure the alignment is good here |
ensure the alignment is good here../include/libtorrent/aux_/pool.hpp:49POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TORRENT_POOL_HPP
#define TORRENT_POOL_HPP
#include "libtorrent/aux_/disable_warnings_push.hpp"
#include <boost/pool/pool.hpp>
#include <boost/pool/object_pool.hpp>
#include "libtorrent/aux_/disable_warnings_pop.hpp"
namespace libtorrent {
namespace aux {
struct allocator_new_delete
{
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
static char* malloc(size_type const bytes)
{ return new char[bytes]; }
static void free(char* const block)
{ delete [] block; }
};
using pool = boost::pool<allocator_new_delete>;
template <typename T>
using object_pool = boost::object_pool<T, allocator_new_delete>;
}
}
#endif
| ||
relevance 0 | ../include/libtorrent/aux_/allocating_handler.hpp:317 | in C++17, Handler and Storage could just use "auto" |
in C++17, Handler and Storage could just use "auto"../include/libtorrent/aux_/allocating_handler.hpp:317
private:
Handler handler;
handler_storage<Size, Name>* storage;
#ifndef BOOST_NO_EXCEPTIONS
error_handler_interface* error_handler;
#endif
};
template <class Handler, size_t Size, HandlerName Name>
aux::allocating_handler<Handler, Size, Name>
make_handler(Handler handler
, handler_storage<Size, Name>& storage
, error_handler_interface& err_handler)
{
return aux::allocating_handler<Handler, Size, Name>(
std::forward<Handler>(handler), &storage, &err_handler);
}
template <typename T
, typename HandlerType
, HandlerType Handler
, void (T::*ErrorHandler)(error_code const&)
, void (T::*ExceptHandler)(std::exception const&)
, typename StorageType
, StorageType T::* Storage>
struct handler
{
explicit handler(std::shared_ptr<T> p) : ptr_(std::move(p)) {}
std::shared_ptr<T> ptr_;
template <class... A>
void operator()(A&&... a)
{
#ifdef BOOST_NO_EXCEPTIONS
(ptr_.get()->*Handler)(std::forward<A>(a)...);
#else
try
{
(ptr_.get()->*Handler)(std::forward<A>(a)...);
}
catch (system_error const& e)
{
(ptr_.get()->*ErrorHandler)(e.code());
}
catch (std::exception const& e)
{
(ptr_.get()->*ExceptHandler)(e);
}
| ||
relevance 0 | ../include/libtorrent/aux_/merkle_tree.hpp:85 | remove this constructor. Don't support "uninitialized" trees. This also requires not constructing these for pad-files and small files as well. So, a sparse hash list in torrent_info |
remove this constructor. Don't support "uninitialized" trees. This
also requires not constructing these for pad-files and small files as
well. So, a sparse hash list in torrent_info../include/libtorrent/aux_/merkle_tree.hpp:85// The invariant of the tree is that all interior nodes (i.e. all but the very
// bottom leaf nodes, representing block hashes) are either set and valid, or
// clear. No invalid hashes are allowed, and they can only be added by also
// providing proof of being valid.
// The leaf blocks on the other hand, MAY be invalid. For instance, when adding
// a magnet link for a torrent that we already have files for. Once we have the
// metadata, we have files on disk but no hashes. We won't know whether the data
// on disk is valid or not, until we've downloaded the hashes to validate them.
// Idea for future space optimization:
// while downloading, we need to store interior nodes of this tree. However, we
// don't need to store the padding. a SHA-256 is 32 bytes. Instead of storing
// the full (padded) tree of SHA-256 hashes, store the full tree of 32 bit
// signed integers, being indices into the actual storage for the tree. We could
// even grow the storage lazily. Instead of storing the padding hashes, use
// negative indices to refer to fixed SHA-256(0), and SHA-256(SHA-256(0)) and so
// on
struct TORRENT_EXTRA_EXPORT merkle_tree
{
merkle_tree() = default;
merkle_tree(int num_blocks, int blocks_per_piece, char const* r);
sha256_hash root() const;
void load_tree(span<sha256_hash const> t, std::vector<bool> const& verified);
void load_sparse_tree(span<sha256_hash const> t, std::vector<bool> const& mask
, std::vector<bool> const& verified);
void load_verified_bits(std::vector<bool> const& verified);
std::size_t size() const;
int end_index() const { return int(size()); }
bool has_node(int idx) const;
bool compare_node(int idx, sha256_hash const& h) const;
sha256_hash operator[](int idx) const;
std::vector<sha256_hash> build_vector() const;
std::pair<std::vector<sha256_hash>, aux::vector<bool>> build_sparse_vector() const;
// get bits indicating if each leaf hash is verified
std::vector<bool> verified_leafs() const;
// returns true if the entire tree is known and verified
bool is_complete() const;
// returns true if all block hashes in the specified range have been verified
bool blocks_verified(int block_idx, int num_blocks) const;
| ||
relevance 0 | ../include/libtorrent/aux_/merkle_tree.hpp:175 | make this a std::unique_ptr |
make this a std::unique_ptr../include/libtorrent/aux_/merkle_tree.hpp:175
int blocks_per_piece() const { return 1 << m_blocks_per_piece_log; }
// the number tree levels per piece. This is 0 if the block layer is also
// the piece layer.
int piece_levels() const { return m_blocks_per_piece_log; }
int block_layer_start() const;
int piece_layer_start() const;
int num_pieces() const;
int num_leafs() const;
void optimize_storage();
void optimize_storage_piece_layer();
void allocate_full();
// a pointer to the root hash for this file.
char const* m_root = nullptr;
// this is either the full tree, or some sparse representation of it,
// depending on m_mode
aux::vector<sha256_hash> m_tree;
// when the full tree is allocated, this has one bit for each block hash. a
// 1 means we have verified the block hash to be correct, otherwise the block
// hash may represent what's on disk, but we haven't been able to verify it
// yet
bitfield m_block_verified;
// number of blocks in the file this tree represents. The number of leafs in
// the tree is rounded up to an even power of 2.
int m_num_blocks = 0;
// the number of blocks per piece, specified as how many steps to shift
// right 1 to get the number of blocks in one piece. This is a compact
// representation that's valid because pieces are always powers of 2.
// this is necessary to know which layer in the tree the piece layer is.
std::uint8_t m_blocks_per_piece_log = 0;
enum class mode_t : std::uint8_t
{
// a default constructed tree is truly empty. It does not even have a
// root hash
uninitialized_tree,
// we don't have any hashes in this tree. m_tree should be empty
// an empty tree still always have the root hash (available as root())
empty_tree,
// in this mode, m_tree represents the full tree, including padding.
full_tree,
| ||
relevance 0 | ../include/libtorrent/aux_/utp_stream.hpp:693 | it would be nice to make this private |
it would be nice to make this private../include/libtorrent/aux_/utp_stream.hpp:693 bool consume_incoming_data(
utp_header const* ph, std::uint8_t const* ptr, int payload_size, time_point now);
void update_mtu_limits();
void experienced_loss(std::uint32_t seq_nr, time_point now);
void send_deferred_ack();
void socket_drained();
void set_userdata(utp_stream* s) { m_userdata = s; }
void abort();
udp::endpoint remote_endpoint() const;
std::uint16_t receive_id() const { return m_recv_id; }
bool match(udp::endpoint const& ep, std::uint16_t id) const;
// non-copyable
utp_socket_impl(utp_socket_impl const&) = delete;
utp_socket_impl const& operator=(utp_socket_impl const&) = delete;
// The underlying UDP socket this uTP socket is bound to
std::weak_ptr<utp_socket_interface> m_sock;
void add_write_buffer(void const* buf, int len);
void add_read_buffer(void* buf, int len);
int send_delay() const { return m_send_delay; }
int recv_delay() const { return m_recv_delay; }
void issue_read();
void issue_write();
void subscribe_writeable();
bool check_fin_sent() const;
void do_connect(tcp::endpoint const& ep);
std::size_t read_some(bool const clear_buffers, error_code& ec);
std::size_t write_some(bool const clear_buffers); // Warning: non-blocking
int receive_buffer_size() const { return m_receive_buffer_size; }
bool null_buffers() const { return m_null_buffers; }
private:
// it's important that these match the enums in performance_counters for
// num_utp_idle etc.
enum class state_t {
// not yet connected
none,
// sent a syn packet, not received any acks
syn_sent,
| ||