2017-07-14 14:41:49 -04:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2018-02-24 13:16:11 -05:00
|
|
|
require 'ipaddr'
|
|
|
|
require 'socket'
|
2018-11-22 14:12:04 -05:00
|
|
|
require 'resolv'
|
2018-02-24 13:16:11 -05:00
|
|
|
|
2018-11-27 13:46:05 -05:00
|
|
|
# Monkey-patch the HTTP.rb timeout class to avoid using a timeout block
|
|
|
|
# around the Socket#open method, since we use our own timeout blocks inside
|
|
|
|
# that method
|
|
|
|
class HTTP::Timeout::PerOperation
|
|
|
|
def connect(socket_class, host, port, nodelay = false)
|
|
|
|
@socket = socket_class.open(host, port)
|
|
|
|
@socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1) if nodelay
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2017-07-14 14:41:49 -04:00
|
|
|
class Request
|
|
|
|
REQUEST_TARGET = '(request-target)'
|
|
|
|
|
2019-07-01 18:34:38 -04:00
|
|
|
# We enforce a 5s timeout on DNS resolving, 5s timeout on socket opening
|
|
|
|
# and 5s timeout on the TLS handshake, meaning the worst case should take
|
|
|
|
# about 15s in total
|
|
|
|
TIMEOUT = { connect: 5, read: 10, write: 10 }.freeze
|
|
|
|
|
2017-07-14 14:41:49 -04:00
|
|
|
include RoutingHelper
|
|
|
|
|
2017-12-06 05:41:57 -05:00
|
|
|
def initialize(verb, url, **options)
|
2018-05-02 09:44:22 -04:00
|
|
|
raise ArgumentError if url.blank?
|
|
|
|
|
2019-07-01 18:34:38 -04:00
|
|
|
@verb = verb
|
|
|
|
@url = Addressable::URI.parse(url).normalize
|
|
|
|
@http_client = options.delete(:http_client)
|
2022-12-15 11:04:38 -05:00
|
|
|
@allow_local = options.delete(:allow_local)
|
|
|
|
@options = options.merge(socket_class: use_proxy? || @allow_local ? ProxySocket : Socket)
|
2022-08-24 22:41:14 -04:00
|
|
|
@options = @options.merge(proxy_url) if use_proxy?
|
2019-07-01 18:34:38 -04:00
|
|
|
@headers = {}
|
2017-07-14 14:41:49 -04:00
|
|
|
|
2018-04-24 20:14:49 -04:00
|
|
|
raise Mastodon::HostValidationError, 'Instance does not support hidden service connections' if block_hidden_service?
|
2018-05-02 09:44:22 -04:00
|
|
|
|
2017-07-14 14:41:49 -04:00
|
|
|
set_common_headers!
|
2017-08-09 17:54:14 -04:00
|
|
|
set_digest! if options.key?(:body)
|
2017-07-14 14:41:49 -04:00
|
|
|
end
|
|
|
|
|
2022-09-21 16:45:57 -04:00
|
|
|
def on_behalf_of(actor, sign_with: nil)
|
|
|
|
raise ArgumentError, 'actor must not be nil' if actor.nil?
|
2017-08-09 17:54:14 -04:00
|
|
|
|
2022-09-21 16:45:57 -04:00
|
|
|
@actor = actor
|
|
|
|
@keypair = sign_with.present? ? OpenSSL::PKey::RSA.new(sign_with) : @actor.keypair
|
2017-08-09 17:54:14 -04:00
|
|
|
|
|
|
|
self
|
2017-07-14 14:41:49 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def add_headers(new_headers)
|
|
|
|
@headers.merge!(new_headers)
|
2017-08-09 17:54:14 -04:00
|
|
|
self
|
2017-07-14 14:41:49 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def perform
|
2018-03-24 07:49:54 -04:00
|
|
|
begin
|
2019-07-01 18:34:38 -04:00
|
|
|
response = http_client.public_send(@verb, @url.to_s, @options.merge(headers: headers))
|
2018-03-24 07:49:54 -04:00
|
|
|
rescue => e
|
2020-01-10 20:15:03 -05:00
|
|
|
raise e.class, "#{e.message} on #{@url}", e.backtrace[0]
|
2018-03-24 07:49:54 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
begin
|
2019-07-01 18:34:38 -04:00
|
|
|
# If we are using a persistent connection, we have to
|
|
|
|
# read every response to be able to move forward at all.
|
|
|
|
# However, simply calling #to_s or #flush may not be safe,
|
|
|
|
# as the response body, if malicious, could be too big
|
|
|
|
# for our memory. So we use the #body_with_limit method
|
|
|
|
response.body_with_limit if http_client.persistent?
|
|
|
|
|
|
|
|
yield response if block_given?
|
2018-03-24 07:49:54 -04:00
|
|
|
ensure
|
2019-07-01 18:34:38 -04:00
|
|
|
http_client.close unless http_client.persistent?
|
2018-03-24 07:49:54 -04:00
|
|
|
end
|
2017-07-14 14:41:49 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def headers
|
2022-09-21 16:45:57 -04:00
|
|
|
(@actor ? @headers.merge('Signature' => signature) : @headers).without(REQUEST_TARGET)
|
2017-07-14 14:41:49 -04:00
|
|
|
end
|
|
|
|
|
2018-12-26 13:15:53 -05:00
|
|
|
class << self
|
|
|
|
def valid_url?(url)
|
|
|
|
begin
|
|
|
|
parsed_url = Addressable::URI.parse(url)
|
|
|
|
rescue Addressable::URI::InvalidURIError
|
|
|
|
return false
|
|
|
|
end
|
|
|
|
|
|
|
|
%w(http https).include?(parsed_url.scheme) && parsed_url.host.present?
|
|
|
|
end
|
2019-07-01 18:34:38 -04:00
|
|
|
|
|
|
|
def http_client
|
2021-11-05 18:23:05 -04:00
|
|
|
HTTP.use(:auto_inflate).timeout(TIMEOUT.dup).follow(max_hops: 3)
|
2019-07-01 18:34:38 -04:00
|
|
|
end
|
2018-12-26 13:15:53 -05:00
|
|
|
end
|
|
|
|
|
2017-07-14 14:41:49 -04:00
|
|
|
private
|
|
|
|
|
|
|
|
def set_common_headers!
|
2018-05-10 08:36:12 -04:00
|
|
|
@headers[REQUEST_TARGET] = "#{@verb} #{@url.path}"
|
2018-05-17 19:47:22 -04:00
|
|
|
@headers['User-Agent'] = Mastodon::Version.user_agent
|
2018-05-10 08:36:12 -04:00
|
|
|
@headers['Host'] = @url.host
|
|
|
|
@headers['Date'] = Time.now.utc.httpdate
|
|
|
|
@headers['Accept-Encoding'] = 'gzip' if @verb != :head
|
2017-07-14 14:41:49 -04:00
|
|
|
end
|
|
|
|
|
2017-08-09 17:54:14 -04:00
|
|
|
def set_digest!
|
|
|
|
@headers['Digest'] = "SHA-256=#{Digest::SHA256.base64digest(@options[:body])}"
|
|
|
|
end
|
|
|
|
|
2017-07-14 14:41:49 -04:00
|
|
|
def signature
|
|
|
|
algorithm = 'rsa-sha256'
|
2020-08-31 21:04:00 -04:00
|
|
|
signature = Base64.strict_encode64(@keypair.sign(OpenSSL::Digest.new('SHA256'), signed_string))
|
2017-07-14 14:41:49 -04:00
|
|
|
|
2018-08-30 22:22:52 -04:00
|
|
|
"keyId=\"#{key_id}\",algorithm=\"#{algorithm}\",headers=\"#{signed_headers.keys.join(' ').downcase}\",signature=\"#{signature}\""
|
2017-07-14 14:41:49 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def signed_string
|
2018-08-30 22:22:52 -04:00
|
|
|
signed_headers.map { |key, value| "#{key.downcase}: #{value}" }.join("\n")
|
2017-07-14 14:41:49 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def signed_headers
|
2018-08-30 22:22:52 -04:00
|
|
|
@headers.without('User-Agent', 'Accept-Encoding')
|
2017-07-14 14:41:49 -04:00
|
|
|
end
|
|
|
|
|
2017-08-09 17:54:14 -04:00
|
|
|
def key_id
|
2022-09-21 16:45:57 -04:00
|
|
|
ActivityPub::TagManager.instance.key_uri_for(@actor)
|
2017-08-09 17:54:14 -04:00
|
|
|
end
|
|
|
|
|
2017-07-14 14:41:49 -04:00
|
|
|
def http_client
|
2019-07-01 18:34:38 -04:00
|
|
|
@http_client ||= Request.http_client
|
2017-07-14 14:41:49 -04:00
|
|
|
end
|
2018-02-24 13:16:11 -05:00
|
|
|
|
2018-04-24 20:14:49 -04:00
|
|
|
def use_proxy?
|
2022-08-24 22:41:14 -04:00
|
|
|
proxy_url.present?
|
|
|
|
end
|
|
|
|
|
|
|
|
def proxy_url
|
|
|
|
if hidden_service? && Rails.configuration.x.http_client_hidden_proxy.present?
|
|
|
|
Rails.configuration.x.http_client_hidden_proxy
|
|
|
|
else
|
|
|
|
Rails.configuration.x.http_client_proxy
|
|
|
|
end
|
2018-04-24 20:14:49 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def block_hidden_service?
|
2022-08-24 22:41:14 -04:00
|
|
|
!Rails.configuration.x.access_to_hidden_service && hidden_service?
|
|
|
|
end
|
|
|
|
|
|
|
|
def hidden_service?
|
|
|
|
/\.(onion|i2p)$/.match?(@url.host)
|
2018-04-24 20:14:49 -04:00
|
|
|
end
|
|
|
|
|
2018-03-26 08:02:10 -04:00
|
|
|
module ClientLimit
|
|
|
|
def body_with_limit(limit = 1.megabyte)
|
|
|
|
raise Mastodon::LengthValidationError if content_length.present? && content_length > limit
|
|
|
|
|
|
|
|
if charset.nil?
|
|
|
|
encoding = Encoding::BINARY
|
|
|
|
else
|
|
|
|
begin
|
|
|
|
encoding = Encoding.find(charset)
|
|
|
|
rescue ArgumentError
|
|
|
|
encoding = Encoding::BINARY
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
contents = String.new(encoding: encoding)
|
|
|
|
|
|
|
|
while (chunk = readpartial)
|
|
|
|
contents << chunk
|
|
|
|
chunk.clear
|
|
|
|
|
|
|
|
raise Mastodon::LengthValidationError if contents.bytesize > limit
|
|
|
|
end
|
|
|
|
|
|
|
|
contents
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2022-11-07 22:00:27 -05:00
|
|
|
if ::HTTP::Response.methods.include?(:body_with_limit) && !Rails.env.production?
|
|
|
|
abort 'HTTP::Response#body_with_limit is already defined, the monkey patch will not be applied'
|
|
|
|
else
|
|
|
|
class ::HTTP::Response
|
|
|
|
include Request::ClientLimit
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2018-02-24 13:16:11 -05:00
|
|
|
class Socket < TCPSocket
|
|
|
|
class << self
|
|
|
|
def open(host, *args)
|
2018-03-20 04:06:08 -04:00
|
|
|
outer_e = nil
|
2019-07-01 18:34:38 -04:00
|
|
|
port = args.first
|
2018-11-22 14:12:04 -05:00
|
|
|
|
2019-07-06 20:05:38 -04:00
|
|
|
addresses = []
|
|
|
|
begin
|
|
|
|
addresses = [IPAddr.new(host)]
|
|
|
|
rescue IPAddr::InvalidAddressError
|
|
|
|
Resolv::DNS.open do |dns|
|
|
|
|
dns.timeouts = 5
|
2022-11-08 10:36:26 -05:00
|
|
|
addresses = dns.getaddresses(host)
|
|
|
|
addresses = addresses.filter { |addr| addr.is_a?(Resolv::IPv6) }.take(2) + addresses.filter { |addr| !addr.is_a?(Resolv::IPv6) }.take(2)
|
2019-07-06 20:05:38 -04:00
|
|
|
end
|
|
|
|
end
|
2018-11-22 14:12:04 -05:00
|
|
|
|
2019-09-04 23:32:53 -04:00
|
|
|
socks = []
|
|
|
|
addr_by_socket = {}
|
|
|
|
|
2019-07-06 20:05:38 -04:00
|
|
|
addresses.each do |address|
|
|
|
|
begin
|
2022-09-20 17:30:26 -04:00
|
|
|
check_private_address(address, host)
|
2019-07-06 20:05:38 -04:00
|
|
|
|
|
|
|
sock = ::Socket.new(address.is_a?(Resolv::IPv6) ? ::Socket::AF_INET6 : ::Socket::AF_INET, ::Socket::SOCK_STREAM, 0)
|
|
|
|
sockaddr = ::Socket.pack_sockaddr_in(port, address.to_s)
|
|
|
|
|
|
|
|
sock.setsockopt(::Socket::IPPROTO_TCP, ::Socket::TCP_NODELAY, 1)
|
2018-11-22 14:12:04 -05:00
|
|
|
|
2019-09-04 23:32:53 -04:00
|
|
|
sock.connect_nonblock(sockaddr)
|
2019-07-06 20:05:38 -04:00
|
|
|
|
2019-09-04 23:32:53 -04:00
|
|
|
# If that hasn't raised an exception, we somehow managed to connect
|
|
|
|
# immediately, close pending sockets and return immediately
|
|
|
|
socks.each(&:close)
|
2019-07-06 20:05:38 -04:00
|
|
|
return sock
|
2019-09-04 23:32:53 -04:00
|
|
|
rescue IO::WaitWritable
|
|
|
|
socks << sock
|
|
|
|
addr_by_socket[sock] = sockaddr
|
2019-07-06 20:05:38 -04:00
|
|
|
rescue => e
|
|
|
|
outer_e = e
|
2018-03-20 04:06:08 -04:00
|
|
|
end
|
|
|
|
end
|
2018-11-22 14:12:04 -05:00
|
|
|
|
2019-09-04 23:32:53 -04:00
|
|
|
until socks.empty?
|
|
|
|
_, available_socks, = IO.select(nil, socks, nil, Request::TIMEOUT[:connect])
|
|
|
|
|
|
|
|
if available_socks.nil?
|
|
|
|
socks.each(&:close)
|
|
|
|
raise HTTP::TimeoutError, "Connect timed out after #{Request::TIMEOUT[:connect]} seconds"
|
|
|
|
end
|
|
|
|
|
|
|
|
available_socks.each do |sock|
|
|
|
|
socks.delete(sock)
|
|
|
|
|
|
|
|
begin
|
|
|
|
sock.connect_nonblock(addr_by_socket[sock])
|
|
|
|
rescue Errno::EISCONN
|
2020-07-14 13:05:07 -04:00
|
|
|
# Do nothing
|
2019-09-04 23:32:53 -04:00
|
|
|
rescue => e
|
|
|
|
sock.close
|
|
|
|
outer_e = e
|
|
|
|
next
|
|
|
|
end
|
|
|
|
|
|
|
|
socks.each(&:close)
|
|
|
|
return sock
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2018-11-27 12:13:36 -05:00
|
|
|
if outer_e
|
|
|
|
raise outer_e
|
|
|
|
else
|
|
|
|
raise SocketError, "No address for #{host}"
|
|
|
|
end
|
2018-02-24 13:16:11 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
alias new open
|
2018-04-24 20:14:49 -04:00
|
|
|
|
2022-09-20 17:30:26 -04:00
|
|
|
def check_private_address(address, host)
|
2020-09-26 14:57:39 -04:00
|
|
|
addr = IPAddr.new(address.to_s)
|
|
|
|
return if private_address_exceptions.any? { |range| range.include?(addr) }
|
2022-09-20 17:30:26 -04:00
|
|
|
raise Mastodon::PrivateNetworkAddressError, host if PrivateAddressCheck.private_address?(addr)
|
2020-09-26 14:57:39 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def private_address_exceptions
|
|
|
|
@private_address_exceptions = begin
|
|
|
|
(ENV['ALLOWED_PRIVATE_ADDRESSES'] || '').split(',').map { |addr| IPAddr.new(addr) }
|
|
|
|
end
|
2019-07-06 20:05:38 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
class ProxySocket < Socket
|
|
|
|
class << self
|
2022-11-11 15:31:03 -05:00
|
|
|
def check_private_address(_address, _host)
|
2019-07-06 20:05:38 -04:00
|
|
|
# Accept connections to private addresses as HTTP proxies will usually
|
|
|
|
# be on local addresses
|
|
|
|
nil
|
2018-04-24 20:14:49 -04:00
|
|
|
end
|
2018-02-24 13:16:11 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-07-06 20:05:38 -04:00
|
|
|
private_constant :ClientLimit, :Socket, :ProxySocket
|
2017-07-14 14:41:49 -04:00
|
|
|
end
|